From 23ef07655615222ba4f7d11a6dc622baea0cac85 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 13 Mar 2024 23:26:07 -0300 Subject: [PATCH 01/79] Create new message type for validators to broadcast signature --- .../libs/roles/src/validator/messages/l1_batch_signature.rs | 6 ++++++ node/libs/roles/src/validator/messages/mod.rs | 2 ++ 2 files changed, 8 insertions(+) create mode 100644 node/libs/roles/src/validator/messages/l1_batch_signature.rs diff --git a/node/libs/roles/src/validator/messages/l1_batch_signature.rs b/node/libs/roles/src/validator/messages/l1_batch_signature.rs new file mode 100644 index 00000000..a0f4cf45 --- /dev/null +++ b/node/libs/roles/src/validator/messages/l1_batch_signature.rs @@ -0,0 +1,6 @@ +use crate::validator::Signature; + +/// A message to send by validators to the gossip network. +/// It contains the validators signature to sign the block batches to be sent to L1. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct L1BatchSignature(pub Signature); diff --git a/node/libs/roles/src/validator/messages/mod.rs b/node/libs/roles/src/validator/messages/mod.rs index bcb34689..207d4082 100644 --- a/node/libs/roles/src/validator/messages/mod.rs +++ b/node/libs/roles/src/validator/messages/mod.rs @@ -3,6 +3,7 @@ mod block; mod consensus; mod discovery; +mod l1_batch_signature; mod leader_commit; mod leader_prepare; mod msg; @@ -12,6 +13,7 @@ mod replica_prepare; pub use block::*; pub use consensus::*; pub use discovery::*; +pub use l1_batch_signature::*; pub use leader_commit::*; pub use leader_prepare::*; pub use msg::*; From 93732c4c736528899cabecd93d92aeecbb74ded6 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 13 Mar 2024 23:26:38 -0300 Subject: [PATCH 02/79] Add new message to send to the gossip network --- node/libs/roles/src/validator/messages/msg.rs | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/node/libs/roles/src/validator/messages/msg.rs b/node/libs/roles/src/validator/messages/msg.rs index a2da4fa4..e0fa189b 100644 --- a/node/libs/roles/src/validator/messages/msg.rs +++ b/node/libs/roles/src/validator/messages/msg.rs @@ -1,6 +1,9 @@ //! Generic message types. -use super::{ConsensusMsg, NetAddress}; -use crate::{node::SessionId, validator, validator::Error}; +use super::{ConsensusMsg, L1BatchSignature, NetAddress}; +use crate::{ + node::SessionId, + validator::{self, Error}, +}; use std::fmt; use zksync_consensus_crypto::{keccak256, ByteFmt, Text, TextFmt}; use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; @@ -14,6 +17,8 @@ pub enum Msg { SessionId(SessionId), /// validator discovery NetAddress(NetAddress), + /// l1 batch signature + L1BatchSignature(L1BatchSignature), } impl Msg { @@ -59,6 +64,18 @@ impl Variant for NetAddress { } } +impl Variant for L1BatchSignature { + fn insert(self) -> Msg { + Msg::L1BatchSignature(self) + } + fn extract(msg: Msg) -> Result { + let Msg::L1BatchSignature(this) = msg else { + return Err(BadVariantError); + }; + Ok(this) + } +} + /// Hash of a message. #[derive(Clone, Copy, PartialEq, Eq)] pub struct MsgHash(pub(crate) keccak256::Keccak256); From cbc7790d1a3ad679f7375fcbd15cd287f45c6f7c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 13 Mar 2024 23:26:59 -0300 Subject: [PATCH 03/79] Add protobuf related functions for new message --- node/libs/roles/src/proto/validator.proto | 5 +++++ node/libs/roles/src/validator/conv.rs | 25 ++++++++++++++++++++--- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/node/libs/roles/src/proto/validator.proto b/node/libs/roles/src/proto/validator.proto index f1ec2438..33c09ee9 100644 --- a/node/libs/roles/src/proto/validator.proto +++ b/node/libs/roles/src/proto/validator.proto @@ -144,11 +144,16 @@ message NetAddress { optional std.Timestamp timestamp = 3; // required } +message L1BatchSignature { + optional bytes signature = 1; // required +} + message Msg { oneof t { // required ConsensusMsg consensus = 1; bytes session_id = 2; NetAddress net_address = 3; + L1BatchSignature l1_batch_signature = 4; } } diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 04886834..f33ba58d 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,8 +1,9 @@ use super::{ AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg, - FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, MsgHash, - NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, - ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, + FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, L1BatchSignature, LeaderCommit, + LeaderPrepare, Msg, MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, + ProtocolVersion, PublicKey, ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, + ValidatorSet, View, ViewNumber, }; use crate::{node::SessionId, proto::validator as proto}; use anyhow::Context as _; @@ -355,6 +356,20 @@ impl ProtoFmt for NetAddress { } } +impl ProtoFmt for L1BatchSignature { + type Proto = proto::L1BatchSignature; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self(ByteFmt::decode(required(&r.signature)?)?)) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + signature: Some(self.0.encode()), + } + } +} + impl ProtoFmt for Msg { type Proto = proto::Msg; @@ -364,6 +379,9 @@ impl ProtoFmt for Msg { T::Consensus(r) => Self::Consensus(ProtoFmt::read(r).context("Consensus")?), T::SessionId(r) => Self::SessionId(SessionId(r.clone())), T::NetAddress(r) => Self::NetAddress(ProtoFmt::read(r).context("NetAddress")?), + T::L1BatchSignature(r) => { + Self::L1BatchSignature(ProtoFmt::read(r).context("L1BatchSignature")?) + } }) } @@ -374,6 +392,7 @@ impl ProtoFmt for Msg { Self::Consensus(x) => T::Consensus(x.build()), Self::SessionId(x) => T::SessionId(x.0.clone()), Self::NetAddress(x) => T::NetAddress(x.build()), + Self::L1BatchSignature(x) => T::L1BatchSignature(x.build()), }; Self::Proto { t: Some(t) } From a9426230e462bfb6972d94f72792b6ec6cfe0a0e Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 19 Mar 2024 16:35:21 -0300 Subject: [PATCH 04/79] Update batch signature message --- node/libs/roles/src/proto/validator.proto | 4 ++-- node/libs/roles/src/validator/conv.rs | 6 +++--- .../roles/src/validator/messages/l1_batch.rs | 20 +++++++++++++++++++ .../validator/messages/l1_batch_signature.rs | 6 ------ node/libs/roles/src/validator/messages/mod.rs | 4 ++-- node/libs/roles/src/validator/messages/msg.rs | 6 +++--- 6 files changed, 30 insertions(+), 16 deletions(-) create mode 100644 node/libs/roles/src/validator/messages/l1_batch.rs delete mode 100644 node/libs/roles/src/validator/messages/l1_batch_signature.rs diff --git a/node/libs/roles/src/proto/validator.proto b/node/libs/roles/src/proto/validator.proto index 33c09ee9..909328c0 100644 --- a/node/libs/roles/src/proto/validator.proto +++ b/node/libs/roles/src/proto/validator.proto @@ -144,7 +144,7 @@ message NetAddress { optional std.Timestamp timestamp = 3; // required } -message L1BatchSignature { +message L1BatchSignatureMsg { optional bytes signature = 1; // required } @@ -153,7 +153,7 @@ message Msg { ConsensusMsg consensus = 1; bytes session_id = 2; NetAddress net_address = 3; - L1BatchSignature l1_batch_signature = 4; + L1BatchSignatureMsg l1_batch_signature = 4; } } diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index f33ba58d..e867dc71 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,6 +1,6 @@ use super::{ AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg, - FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, L1BatchSignature, LeaderCommit, + FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, L1BatchSignatureMsg, LeaderCommit, LeaderPrepare, Msg, MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, @@ -356,8 +356,8 @@ impl ProtoFmt for NetAddress { } } -impl ProtoFmt for L1BatchSignature { - type Proto = proto::L1BatchSignature; +impl ProtoFmt for L1BatchSignatureMsg { + type Proto = proto::L1BatchSignatureMsg; fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self(ByteFmt::decode(required(&r.signature)?)?)) diff --git a/node/libs/roles/src/validator/messages/l1_batch.rs b/node/libs/roles/src/validator/messages/l1_batch.rs new file mode 100644 index 00000000..6ac3ee7f --- /dev/null +++ b/node/libs/roles/src/validator/messages/l1_batch.rs @@ -0,0 +1,20 @@ +use crate::validator::{self, Signature}; + +/// A message to send by validators to the gossip network. +/// It contains the validators signature to sign the block batches to be sent to L1. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct L1BatchSignatureMsg(pub Signature); + +/// A certificate for a batch of L2 blocks to be sent to L1. +/// It contains the signatures of the validators that signed the batch. +#[derive(Debug, Clone, Default, Eq, PartialEq)] +pub struct L1BatchQC { + /// The aggregate signature of the signed L1 batches. + pub signatures: validator::AggregateSignature, +} + +impl L1BatchQC { + pub fn add(&mut self, signature: Signature) { + self.signatures.add(&signature); + } +} diff --git a/node/libs/roles/src/validator/messages/l1_batch_signature.rs b/node/libs/roles/src/validator/messages/l1_batch_signature.rs deleted file mode 100644 index a0f4cf45..00000000 --- a/node/libs/roles/src/validator/messages/l1_batch_signature.rs +++ /dev/null @@ -1,6 +0,0 @@ -use crate::validator::Signature; - -/// A message to send by validators to the gossip network. -/// It contains the validators signature to sign the block batches to be sent to L1. -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct L1BatchSignature(pub Signature); diff --git a/node/libs/roles/src/validator/messages/mod.rs b/node/libs/roles/src/validator/messages/mod.rs index 207d4082..ccf1b436 100644 --- a/node/libs/roles/src/validator/messages/mod.rs +++ b/node/libs/roles/src/validator/messages/mod.rs @@ -3,7 +3,7 @@ mod block; mod consensus; mod discovery; -mod l1_batch_signature; +mod l1_batch; mod leader_commit; mod leader_prepare; mod msg; @@ -13,7 +13,7 @@ mod replica_prepare; pub use block::*; pub use consensus::*; pub use discovery::*; -pub use l1_batch_signature::*; +pub use l1_batch::*; pub use leader_commit::*; pub use leader_prepare::*; pub use msg::*; diff --git a/node/libs/roles/src/validator/messages/msg.rs b/node/libs/roles/src/validator/messages/msg.rs index e0fa189b..00e08b1d 100644 --- a/node/libs/roles/src/validator/messages/msg.rs +++ b/node/libs/roles/src/validator/messages/msg.rs @@ -1,5 +1,5 @@ //! Generic message types. -use super::{ConsensusMsg, L1BatchSignature, NetAddress}; +use super::{ConsensusMsg, L1BatchSignatureMsg, NetAddress}; use crate::{ node::SessionId, validator::{self, Error}, @@ -18,7 +18,7 @@ pub enum Msg { /// validator discovery NetAddress(NetAddress), /// l1 batch signature - L1BatchSignature(L1BatchSignature), + L1BatchSignature(L1BatchSignatureMsg), } impl Msg { @@ -64,7 +64,7 @@ impl Variant for NetAddress { } } -impl Variant for L1BatchSignature { +impl Variant for L1BatchSignatureMsg { fn insert(self) -> Msg { Msg::L1BatchSignature(self) } From 73122a57d3a2143717a2f36edef30112dfee6581 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 20 Mar 2024 15:38:46 -0300 Subject: [PATCH 05/79] Add new rpc clients for validators to receive signatures --- node/actors/network/src/config.rs | 6 ++ node/actors/network/src/consensus/mod.rs | 58 ++++++++++++++++++- node/actors/network/src/lib.rs | 11 +++- node/actors/network/src/proto/signature.proto | 12 ++++ node/actors/network/src/rpc/mod.rs | 1 + node/actors/network/src/rpc/signature.rs | 49 ++++++++++++++++ 6 files changed, 133 insertions(+), 4 deletions(-) create mode 100644 node/actors/network/src/proto/signature.proto create mode 100644 node/actors/network/src/rpc/signature.rs diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs index 27d6c498..bfcfb208 100644 --- a/node/actors/network/src/config.rs +++ b/node/actors/network/src/config.rs @@ -18,6 +18,8 @@ pub struct RpcConfig { pub get_block_rate: limiter::Rate, /// Max rate of sending/receiving consensus messages. pub consensus_rate: limiter::Rate, + /// Max rate of sending/receiving l1 batch signature messages. + pub l1_batch_signature_rate: limiter::Rate, } impl Default for RpcConfig { @@ -39,6 +41,10 @@ impl Default for RpcConfig { burst: 10, refresh: time::Duration::ZERO, }, + l1_batch_signature_rate: limiter::Rate { + burst: 10, + refresh: time::Duration::ZERO, + }, } } } diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 32a90540..81b87653 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -1,13 +1,18 @@ //! Consensus network is a full graph of connections between all validators. //! BFT consensus messages are exchanged over this network. -use crate::{config, gossip, io, noise, pool::PoolWatch, preface, rpc}; +use crate::{ + config, gossip, io, noise, + pool::PoolWatch, + preface, + rpc::{self, signature::L1BatchSignatureServer}, +}; use anyhow::Context as _; use std::{ collections::{HashMap, HashSet}, sync::Arc, }; use zksync_concurrency::{ctx, oneshot, scope, sync, time}; -use zksync_consensus_roles::validator; +use zksync_consensus_roles::validator::{self, L1BatchQC}; use zksync_protobuf::kB; mod handshake; @@ -33,6 +38,9 @@ pub(crate) struct Network { pub(crate) outbound: PoolWatch, /// RPC clients for all validators. pub(crate) clients: HashMap>, + /// RPC clients for l1 batch signature. + pub(crate) signature_clients: HashMap>, + pub(crate) l1_batch_qc: L1BatchQC, } #[async_trait::async_trait] @@ -59,6 +67,19 @@ impl rpc::Handler for &Network { } } +#[async_trait::async_trait] +impl rpc::Handler for &L1BatchSignatureServer<'_> { + /// Here we bound the buffering of incoming consensus messages. + fn max_req_size(&self) -> usize { + self.0.gossip.cfg.max_block_size.saturating_add(kB) + } + + async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::signature::Req) -> anyhow::Result<()> { + self.0.l1_batch_qc.clone().add(req.0.sig); + Ok(()) + } +} + impl Network { /// Constructs a new consensus network state. pub(crate) fn new(ctx: &ctx::Ctx, gossip: Arc) -> Option> { @@ -66,6 +87,7 @@ impl Network { let validators: HashSet<_> = gossip.genesis().validators.iter().cloned().collect(); Some(Arc::new(Self { key, + l1_batch_qc: L1BatchQC::default(), inbound: PoolWatch::new(validators.clone(), 0), outbound: PoolWatch::new(validators.clone(), 0), clients: validators @@ -77,12 +99,21 @@ impl Network { ) }) .collect(), + signature_clients: validators + .iter() + .map(|peer| { + ( + peer.clone(), + rpc::Client::new(ctx, gossip.cfg.rpc.l1_batch_signature_rate), + ) + }) + .collect(), gossip, })) } /// Sends a message to all validators. - pub(crate) async fn broadcast( + pub(crate) async fn broadcast_consensus_msg( &self, ctx: &ctx::Ctx, msg: validator::Signed, @@ -102,6 +133,27 @@ impl Network { .await } + /// Broadcasts a signature to all validators. + pub(crate) async fn broadcast_signature( + &self, + ctx: &ctx::Ctx, + signature: validator::Signed, + ) -> anyhow::Result<()> { + let req = rpc::signature::Req(signature); + scope::run!(ctx, |ctx, s| async { + for (peer, client) in &self.signature_clients { + s.spawn(async { + if let Err(err) = client.call(ctx, &req, RESP_MAX_SIZE).await { + tracing::info!("send({:?},): {err:#}", &*peer); + } + Ok(()) + }); + } + Ok(()) + }) + .await + } + /// Sends a message to the given validator. pub(crate) async fn send( &self, diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index 73c25cb4..090395fa 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -87,7 +87,11 @@ impl Network { io::Target::Validator(key) => { consensus.send(ctx, &key, message.message).await? } - io::Target::Broadcast => consensus.broadcast(ctx, message.message).await?, + io::Target::Broadcast => { + consensus + .broadcast_consensus_msg(ctx, message.message) + .await? + } } } io::InputMessage::SyncBlocks(io::SyncBlocksInputMessage::GetBlock { @@ -102,6 +106,11 @@ impl Network { Err(err) => Err(io::GetBlockError::Internal(err)), }); } + io::InputMessage::L1BatchSignature(message) => { + let consensus = self.consensus.as_ref().context("not a validator node")?; + let ctx = &ctx.with_timeout(CONSENSUS_MSG_TIMEOUT); + consensus.broadcast_signature(ctx, message.message).await?; + } } Ok(()) } diff --git a/node/actors/network/src/proto/signature.proto b/node/actors/network/src/proto/signature.proto new file mode 100644 index 00000000..a0b53625 --- /dev/null +++ b/node/actors/network/src/proto/signature.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package zksync.network.consensus; + +import "zksync/roles/validator.proto"; +import "zksync/std.proto"; + +message SignatureReq { + optional roles.validator.Signed msg = 1; +} + +message SignatureResp {} diff --git a/node/actors/network/src/rpc/mod.rs b/node/actors/network/src/rpc/mod.rs index 184326cd..e8b01e9b 100644 --- a/node/actors/network/src/rpc/mod.rs +++ b/node/actors/network/src/rpc/mod.rs @@ -27,6 +27,7 @@ mod metrics; pub(crate) mod ping; pub(crate) mod push_block_store_state; pub(crate) mod push_validator_addrs; +pub(crate) mod signature; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] diff --git a/node/actors/network/src/rpc/signature.rs b/node/actors/network/src/rpc/signature.rs new file mode 100644 index 00000000..e6b46c75 --- /dev/null +++ b/node/actors/network/src/rpc/signature.rs @@ -0,0 +1,49 @@ +//! Defines RPC for passing consensus messages. +use crate::{consensus::Network, mux, proto::consensus as proto}; +use zksync_consensus_roles::validator; +use zksync_protobuf::{read_required, ProtoFmt}; + +/// Signature RPC. +pub(crate) struct Rpc; + +impl super::Rpc for Rpc { + const CAPABILITY_ID: mux::CapabilityId = 5; + const INFLIGHT: u32 = 1; + const METHOD: &'static str = "signature"; + type Req = Req; + type Resp = (); +} +pub(crate) struct L1BatchSignatureServer<'a>(pub(crate) &'a Network); +/// Signed consensus message that the receiving peer should process. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct Req(pub(crate) validator::Signed); + +/// Confirmation that the signature message has been processed. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct Resp; + +impl ProtoFmt for Req { + type Proto = proto::SignatureReq; + + fn read(r: &Self::Proto) -> anyhow::Result { + read_required(&r.msg).map(Self) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + msg: Some(self.0.build()), + } + } +} + +impl ProtoFmt for Resp { + type Proto = proto::SignatureResp; + + fn read(_r: &Self::Proto) -> anyhow::Result { + Ok(Self) + } + + fn build(&self) -> Self::Proto { + Self::Proto {} + } +} From 1538d0a098ddbd6fe562c2cb30fe459bec6e1a01 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 20 Mar 2024 15:38:59 -0300 Subject: [PATCH 06/79] Add new input message for network actor --- node/actors/network/src/io.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/node/actors/network/src/io.rs b/node/actors/network/src/io.rs index 9747c8ed..54a405f3 100644 --- a/node/actors/network/src/io.rs +++ b/node/actors/network/src/io.rs @@ -10,6 +10,13 @@ pub enum InputMessage { Consensus(ConsensusInputMessage), /// Message types from the Sync Blocks actor. SyncBlocks(SyncBlocksInputMessage), + /// Message of type L1BatchSignatureMsg from the validator. + L1BatchSignature(L1BatchSignatureInputMessage), +} + +#[derive(Debug)] +pub struct L1BatchSignatureInputMessage { + pub message: validator::Signed, } /// Message types from the Consensus actor. From 11680fd9ce81a714f44f628aa28e2c0ae61a0ba1 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 9 Apr 2024 10:14:59 -0300 Subject: [PATCH 07/79] Update L1Batch message variant --- node/actors/network/src/consensus/mod.rs | 21 ++++++++++------ node/actors/network/src/io.rs | 6 ++--- node/actors/network/src/lib.rs | 2 +- node/actors/network/src/rpc/signature.rs | 4 +-- node/libs/roles/src/proto/validator.proto | 5 ++-- node/libs/roles/src/validator/conv.rs | 25 ++++++++----------- node/libs/roles/src/validator/messages/msg.rs | 12 ++++----- 7 files changed, 37 insertions(+), 38 deletions(-) diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 81b87653..b61b82b3 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -4,7 +4,7 @@ use crate::{ config, gossip, io, noise, pool::PoolWatch, preface, - rpc::{self, signature::L1BatchSignatureServer}, + rpc::{self, signature::L1BatchServer}, }; use anyhow::Context as _; use std::{ @@ -68,15 +68,20 @@ impl rpc::Handler for &Network { } #[async_trait::async_trait] -impl rpc::Handler for &L1BatchSignatureServer<'_> { +impl rpc::Handler for &L1BatchServer<'_> { /// Here we bound the buffering of incoming consensus messages. fn max_req_size(&self) -> usize { self.0.gossip.cfg.max_block_size.saturating_add(kB) } async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::signature::Req) -> anyhow::Result<()> { - self.0.l1_batch_qc.clone().add(req.0.sig); - Ok(()) + let genesis = self.0.gossip.genesis(); + self.0.l1_batch_qc.verify(genesis).unwrap(); + self.0 + .l1_batch_qc + .clone() + .add(req.0.sig.clone(), &req.0, genesis); + return Ok(()); } } @@ -87,7 +92,6 @@ impl Network { let validators: HashSet<_> = gossip.genesis().validators.iter().cloned().collect(); Some(Arc::new(Self { key, - l1_batch_qc: L1BatchQC::default(), inbound: PoolWatch::new(validators.clone(), 0), outbound: PoolWatch::new(validators.clone(), 0), clients: validators @@ -108,7 +112,8 @@ impl Network { ) }) .collect(), - gossip, + gossip: gossip.clone(), + l1_batch_qc: L1BatchQC::new(gossip.genesis().validators.len()), })) } @@ -137,14 +142,14 @@ impl Network { pub(crate) async fn broadcast_signature( &self, ctx: &ctx::Ctx, - signature: validator::Signed, + signature: validator::Signed, ) -> anyhow::Result<()> { let req = rpc::signature::Req(signature); scope::run!(ctx, |ctx, s| async { for (peer, client) in &self.signature_clients { s.spawn(async { if let Err(err) = client.call(ctx, &req, RESP_MAX_SIZE).await { - tracing::info!("send({:?},): {err:#}", &*peer); + tracing::info!("send({:?},): {err:#}", &*peer); } Ok(()) }); diff --git a/node/actors/network/src/io.rs b/node/actors/network/src/io.rs index 54a405f3..b365118b 100644 --- a/node/actors/network/src/io.rs +++ b/node/actors/network/src/io.rs @@ -11,12 +11,12 @@ pub enum InputMessage { /// Message types from the Sync Blocks actor. SyncBlocks(SyncBlocksInputMessage), /// Message of type L1BatchSignatureMsg from the validator. - L1BatchSignature(L1BatchSignatureInputMessage), + L1Batch(L1BatchInputMessage), } #[derive(Debug)] -pub struct L1BatchSignatureInputMessage { - pub message: validator::Signed, +pub struct L1BatchInputMessage { + pub message: validator::Signed, } /// Message types from the Consensus actor. diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index 090395fa..d783cf8f 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -106,7 +106,7 @@ impl Network { Err(err) => Err(io::GetBlockError::Internal(err)), }); } - io::InputMessage::L1BatchSignature(message) => { + io::InputMessage::L1Batch(message) => { let consensus = self.consensus.as_ref().context("not a validator node")?; let ctx = &ctx.with_timeout(CONSENSUS_MSG_TIMEOUT); consensus.broadcast_signature(ctx, message.message).await?; diff --git a/node/actors/network/src/rpc/signature.rs b/node/actors/network/src/rpc/signature.rs index e6b46c75..024c9751 100644 --- a/node/actors/network/src/rpc/signature.rs +++ b/node/actors/network/src/rpc/signature.rs @@ -13,10 +13,10 @@ impl super::Rpc for Rpc { type Req = Req; type Resp = (); } -pub(crate) struct L1BatchSignatureServer<'a>(pub(crate) &'a Network); +pub(crate) struct L1BatchServer<'a>(pub(crate) &'a Network); /// Signed consensus message that the receiving peer should process. #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct Req(pub(crate) validator::Signed); +pub(crate) struct Req(pub(crate) validator::Signed); /// Confirmation that the signature message has been processed. #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/node/libs/roles/src/proto/validator.proto b/node/libs/roles/src/proto/validator.proto index 909328c0..fde66016 100644 --- a/node/libs/roles/src/proto/validator.proto +++ b/node/libs/roles/src/proto/validator.proto @@ -144,8 +144,7 @@ message NetAddress { optional std.Timestamp timestamp = 3; // required } -message L1BatchSignatureMsg { - optional bytes signature = 1; // required +message L1BatchMsg { } message Msg { @@ -153,7 +152,7 @@ message Msg { ConsensusMsg consensus = 1; bytes session_id = 2; NetAddress net_address = 3; - L1BatchSignatureMsg l1_batch_signature = 4; + L1BatchMsg l1_batch = 4; } } diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index e867dc71..e9733227 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,9 +1,8 @@ use super::{ AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg, - FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, L1BatchSignatureMsg, LeaderCommit, - LeaderPrepare, Msg, MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, - ProtocolVersion, PublicKey, ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, - ValidatorSet, View, ViewNumber, + FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, L1BatchMsg, LeaderCommit, LeaderPrepare, + Msg, MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, + ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, }; use crate::{node::SessionId, proto::validator as proto}; use anyhow::Context as _; @@ -356,17 +355,15 @@ impl ProtoFmt for NetAddress { } } -impl ProtoFmt for L1BatchSignatureMsg { - type Proto = proto::L1BatchSignatureMsg; +impl ProtoFmt for L1BatchMsg { + type Proto = proto::L1BatchMsg; - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self(ByteFmt::decode(required(&r.signature)?)?)) + fn read(_r: &Self::Proto) -> anyhow::Result { + Ok(Self {}) } fn build(&self) -> Self::Proto { - Self::Proto { - signature: Some(self.0.encode()), - } + Self::Proto {} } } @@ -379,9 +376,7 @@ impl ProtoFmt for Msg { T::Consensus(r) => Self::Consensus(ProtoFmt::read(r).context("Consensus")?), T::SessionId(r) => Self::SessionId(SessionId(r.clone())), T::NetAddress(r) => Self::NetAddress(ProtoFmt::read(r).context("NetAddress")?), - T::L1BatchSignature(r) => { - Self::L1BatchSignature(ProtoFmt::read(r).context("L1BatchSignature")?) - } + T::L1Batch(r) => Self::L1Batch(ProtoFmt::read(r).context("L1Batch")?), }) } @@ -392,7 +387,7 @@ impl ProtoFmt for Msg { Self::Consensus(x) => T::Consensus(x.build()), Self::SessionId(x) => T::SessionId(x.0.clone()), Self::NetAddress(x) => T::NetAddress(x.build()), - Self::L1BatchSignature(x) => T::L1BatchSignature(x.build()), + Self::L1Batch(x) => T::L1Batch(x.build()), }; Self::Proto { t: Some(t) } diff --git a/node/libs/roles/src/validator/messages/msg.rs b/node/libs/roles/src/validator/messages/msg.rs index 00e08b1d..0137a704 100644 --- a/node/libs/roles/src/validator/messages/msg.rs +++ b/node/libs/roles/src/validator/messages/msg.rs @@ -1,5 +1,5 @@ //! Generic message types. -use super::{ConsensusMsg, L1BatchSignatureMsg, NetAddress}; +use super::{ConsensusMsg, L1BatchMsg, NetAddress}; use crate::{ node::SessionId, validator::{self, Error}, @@ -17,8 +17,8 @@ pub enum Msg { SessionId(SessionId), /// validator discovery NetAddress(NetAddress), - /// l1 batch signature - L1BatchSignature(L1BatchSignatureMsg), + /// l1 batch + L1Batch(L1BatchMsg), } impl Msg { @@ -64,12 +64,12 @@ impl Variant for NetAddress { } } -impl Variant for L1BatchSignatureMsg { +impl Variant for L1BatchMsg { fn insert(self) -> Msg { - Msg::L1BatchSignature(self) + Msg::L1Batch(self) } fn extract(msg: Msg) -> Result { - let Msg::L1BatchSignature(this) = msg else { + let Msg::L1Batch(this) = msg else { return Err(BadVariantError); }; Ok(this) From 94f5b6764ff68f62a6e79fcb988f60f2c6935fc1 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 9 Apr 2024 10:15:14 -0300 Subject: [PATCH 08/79] Add verification for L1 Batcj signatures --- .../roles/src/validator/messages/l1_batch.rs | 56 +++++++++++++++++-- 1 file changed, 50 insertions(+), 6 deletions(-) diff --git a/node/libs/roles/src/validator/messages/l1_batch.rs b/node/libs/roles/src/validator/messages/l1_batch.rs index 6ac3ee7f..b2b1b619 100644 --- a/node/libs/roles/src/validator/messages/l1_batch.rs +++ b/node/libs/roles/src/validator/messages/l1_batch.rs @@ -1,20 +1,64 @@ use crate::validator::{self, Signature}; +use super::{Genesis, Signed}; + /// A message to send by validators to the gossip network. /// It contains the validators signature to sign the block batches to be sent to L1. -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct L1BatchSignatureMsg(pub Signature); +#[derive(Debug, Clone, Eq, PartialEq, Default)] +pub struct L1BatchMsg(); /// A certificate for a batch of L2 blocks to be sent to L1. /// It contains the signatures of the validators that signed the batch. -#[derive(Debug, Clone, Default, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct L1BatchQC { /// The aggregate signature of the signed L1 batches. - pub signatures: validator::AggregateSignature, + pub signature: validator::AggregateSignature, + /// The validators that signed this message. + pub signers: validator::Signers, + /// The message that was signed. + pub message: L1BatchMsg, +} + +#[derive(Debug)] +pub enum L1BatchQCVerifyError { + BadSignature(validator::Error), } impl L1BatchQC { - pub fn add(&mut self, signature: Signature) { - self.signatures.add(&signature); + pub fn new(validators: usize) -> Self { + Self { + signature: validator::AggregateSignature::default(), + signers: validator::Signers::new(validators), + message: L1BatchMsg::default(), + } + } + + pub fn add(&mut self, signature: Signature, msg: &Signed, genesis: &Genesis) { + if self.message != msg.msg { + return; + }; + let Some(i) = genesis.validators.index(&msg.key) else { + return; + }; + if self.signers.0[i] { + return; + }; + self.signers.0.set(i, true); + self.signature.add(&msg.sig); + } + + /// Verifies the signature of the CommitQC. + pub fn verify(&self, genesis: &Genesis) -> Result<(), L1BatchQCVerifyError> { + // Now we can verify the signature. + let messages_and_keys = genesis + .validators + .iter() + .enumerate() + .filter(|(i, _)| self.signers.0[*i]) + .map(|(_, pk)| (self.message.clone(), pk)); + + self.signature + .verify_messages(messages_and_keys) + .map_err(L1BatchQCVerifyError::BadSignature) } } From cec46c16090ab002f03b13b67f7ba61bf9651883 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 9 Apr 2024 11:04:10 -0300 Subject: [PATCH 09/79] Update network struct for l1 batches rpc --- node/actors/network/src/config.rs | 4 ++-- node/actors/network/src/consensus/mod.rs | 10 ++++++++-- node/libs/roles/src/validator/conv.rs | 2 +- node/libs/roles/src/validator/messages/l1_batch.rs | 6 ++++-- node/libs/roles/src/validator/messages/msg.rs | 2 +- 5 files changed, 16 insertions(+), 8 deletions(-) diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs index d20e8234..12e44363 100644 --- a/node/actors/network/src/config.rs +++ b/node/actors/network/src/config.rs @@ -19,7 +19,7 @@ pub struct RpcConfig { /// Max rate of sending/receiving consensus messages. pub consensus_rate: limiter::Rate, /// Max rate of sending/receiving l1 batch signature messages. - pub l1_batch_signature_rate: limiter::Rate, + pub l1_batch_rate: limiter::Rate, } impl Default for RpcConfig { @@ -41,7 +41,7 @@ impl Default for RpcConfig { burst: 10, refresh: time::Duration::ZERO, }, - l1_batch_signature_rate: limiter::Rate { + l1_batch_rate: limiter::Rate { burst: 10, refresh: time::Duration::ZERO, }, diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 77ed96e1..e493f9f2 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -1,5 +1,6 @@ //! Consensus network is a full graph of connections between all validators. //! BFT consensus messages are exchanged over this network. +use crate::rpc::signature::L1BatchServer; use crate::rpc::Rpc as _; use crate::{config, gossip, io, noise, pool::PoolWatch, preface, rpc}; use anyhow::Context as _; @@ -28,6 +29,7 @@ pub(crate) struct Connection { #[allow(dead_code)] addr: std::net::SocketAddr, consensus: rpc::Client, + signatures: rpc::Client, } /// Consensus network state. @@ -40,6 +42,7 @@ pub(crate) struct Network { pub(crate) inbound: PoolWatch, /// Set of the currently open outbound connections. pub(crate) outbound: PoolWatch>, + pub(crate) l1_batch_qc: L1BatchQC, } #[async_trait::async_trait] @@ -93,6 +96,7 @@ impl Network { key, inbound: PoolWatch::new(validators.clone(), 0), outbound: PoolWatch::new(validators.clone(), 0), + l1_batch_qc: L1BatchQC::new(validators.len()), gossip, })) } @@ -130,10 +134,11 @@ impl Network { signature: validator::Signed, ) -> anyhow::Result<()> { let req = rpc::signature::Req(signature); + let outbound = self.outbound.current(); scope::run!(ctx, |ctx, s| async { - for (peer, client) in &self.signature_clients { + for (peer, conn) in &outbound { s.spawn(async { - if let Err(err) = client.call(ctx, &req, RESP_MAX_SIZE).await { + if let Err(err) = conn.signatures.call(ctx, &req, RESP_MAX_SIZE).await { tracing::info!("send({:?},): {err:#}", &*peer); } Ok(()) @@ -212,6 +217,7 @@ impl Network { let conn = Arc::new(Connection { addr, consensus: rpc::Client::new(ctx, self.gossip.cfg.rpc.consensus_rate), + signatures: rpc::Client::new(ctx, self.gossip.cfg.rpc.l1_batch_rate), }); self.outbound.insert(peer.clone(), conn.clone()).await?; tracing::info!("outbound connection to {peer:?}"); diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 3665eaec..8ff73525 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,5 +1,5 @@ use super::{ - AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg, + AggregateSignature, BlockHeader, BlockNumber, CommitQC, ConsensusMsg, FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, L1BatchMsg, LeaderCommit, LeaderPrepare, Msg, MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, diff --git a/node/libs/roles/src/validator/messages/l1_batch.rs b/node/libs/roles/src/validator/messages/l1_batch.rs index b2b1b619..a657fbc4 100644 --- a/node/libs/roles/src/validator/messages/l1_batch.rs +++ b/node/libs/roles/src/validator/messages/l1_batch.rs @@ -19,9 +19,11 @@ pub struct L1BatchQC { pub message: L1BatchMsg, } -#[derive(Debug)] +#[derive(thiserror::Error, Debug)] pub enum L1BatchQCVerifyError { - BadSignature(validator::Error), + /// Bad signature. + #[error("bad signature: {0:#}")] + BadSignature(#[source] anyhow::Error), } impl L1BatchQC { diff --git a/node/libs/roles/src/validator/messages/msg.rs b/node/libs/roles/src/validator/messages/msg.rs index 3e5fa9b1..8c2da3d5 100644 --- a/node/libs/roles/src/validator/messages/msg.rs +++ b/node/libs/roles/src/validator/messages/msg.rs @@ -2,7 +2,7 @@ use super::{ConsensusMsg, L1BatchMsg, NetAddress}; use crate::{ node::SessionId, - validator::{self, Error}, + validator::{self}, }; use std::fmt; use zksync_consensus_crypto::{keccak256, ByteFmt, Text, TextFmt}; From 5c2639b475a8bca5f1594b03dce9512a2754b7ac Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Wed, 10 Apr 2024 17:45:05 +0200 Subject: [PATCH 10/79] introduced msg pool --- node/actors/network/src/consensus/mod.rs | 73 +++++++++++++++++++++--- 1 file changed, 64 insertions(+), 9 deletions(-) diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 7d3242a2..e4ed4e9c 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -4,7 +4,7 @@ use crate::rpc::Rpc as _; use crate::{config, gossip, io, noise, pool::PoolWatch, preface, rpc}; use anyhow::Context as _; use rand::seq::SliceRandom; -use std::{collections::HashSet, sync::Arc}; +use std::{collections::HashSet,collections::BTreeMap, sync::Arc}; use tracing::Instrument as _; use zksync_concurrency::{ctx, oneshot, scope, sync, time}; use zksync_consensus_roles::validator; @@ -21,13 +21,66 @@ const RESP_MAX_SIZE: usize = kB; /// is down. const ADDRESS_ANNOUNCER_INTERVAL: time::Duration = time::Duration::minutes(10); -/// Outbound connection state. -pub(crate) struct Connection { - /// Peer's address. - /// This is not used for now, but will be required for the debug page. - #[allow(dead_code)] - addr: std::net::SocketAddr, - consensus: rpc::Client, +type MsgPoolInner = BTreeMap; + +/// Pool of messages to send. +pub(crate) struct MsgPool(sync::watch::Sender); + +pub(crate) struct MsgPoolRecv { + recv: sync::watch::Receiver, + next: usize, +} + +impl MsgPool { + pub fn new() -> Self { + Self(sync::watch::channel(BTreeMap::new())) + } + + pub fn send(&self, msg: ConsensusInputMessage) { + self.0.send_if_modified(|msgs| { + let next = msgs.0.last().unwrap_or(0)+1; + let (k,v) in &msgs.0 { + use validator::ConsensusMsg as M; + if match (&v.message.msg, &msg.message.msg) { + (M::ReplicaPrepare(a),M::ReplicaPrepare(b)) => a.view()>=b.view(), + (M::ReplicaCommit(a),M::ReplicaCommit(b)) => a.view()>=b.view(), + (M::LeaderPrepare(a),M::LeaderPrepare(b)) => a.view()>=b.view(), + (M::LeaderCommit(a),M::LeaderCommit(b)) => a.view()>=b.view(), + _ => continue, + } { + return false; + } + msgs.0.remove(k); + } + msgs.0.insert(next,msg); + true + }); + } + + /// Subscribes to messages in the pool directed to `target`. + pub fn subscribe(&self, target: validator::PublicKey) -> MsgPoolRecv { + MsgPoolRecv { + recv: self.0.subscribe(), + next: 0, + } + } +} + +impl MsgPoolRecv { + pub async fn recv(&mut self, ctx: &ctx::Ctx, target: &validator::PublicKey) -> ctx::OrCanceled { + loop { + for (k,v) in self.recv.borrow().range(self.next..) { + self.next = k+1; + match v.target { + Target::Broadcast => {} + Target::Validator(x) if x==target => {} + _ => continue, + } + return Ok(v.clone()); + } + sync::changed(ctx, &mut self.recv).await?; + } + } } /// Consensus network state. @@ -39,7 +92,9 @@ pub(crate) struct Network { /// Set of the currently open inbound connections. pub(crate) inbound: PoolWatch, /// Set of the currently open outbound connections. - pub(crate) outbound: PoolWatch>, + pub(crate) outbound: PoolWatch, + /// Last messages sent by this node. + pub(crate) msg_pool: MsgPool, } #[async_trait::async_trait] From 592669265d24ac197ee83828363f89792d15ec09 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 11 Apr 2024 12:00:45 +0200 Subject: [PATCH 11/79] retransmission --- node/actors/network/src/consensus/mod.rs | 173 +++++++++++---------- node/actors/network/src/consensus/tests.rs | 21 ++- node/actors/network/src/gossip/runner.rs | 14 +- node/actors/network/src/lib.rs | 19 +-- node/actors/network/src/rpc/mod.rs | 3 +- node/libs/crypto/src/ed25519/tests.rs | 6 +- 6 files changed, 125 insertions(+), 111 deletions(-) diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index e4ed4e9c..e83337e0 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -1,10 +1,12 @@ //! Consensus network is a full graph of connections between all validators. //! BFT consensus messages are exchanged over this network. -use crate::rpc::Rpc as _; use crate::{config, gossip, io, noise, pool::PoolWatch, preface, rpc}; use anyhow::Context as _; use rand::seq::SliceRandom; -use std::{collections::HashSet,collections::BTreeMap, sync::Arc}; +use std::{ + collections::{BTreeMap, HashSet}, + sync::Arc, +}; use tracing::Instrument as _; use zksync_concurrency::{ctx, oneshot, scope, sync, time}; use zksync_consensus_roles::validator; @@ -21,44 +23,67 @@ const RESP_MAX_SIZE: usize = kB; /// is down. const ADDRESS_ANNOUNCER_INTERVAL: time::Duration = time::Duration::minutes(10); -type MsgPoolInner = BTreeMap; +type MsgPoolInner = BTreeMap; /// Pool of messages to send. -pub(crate) struct MsgPool(sync::watch::Sender); +/// It stores the newest message (with the highest view) of each type. +/// Stored messages are available for retransmission in case of reconnect. +pub(crate) struct MsgPool(sync::watch::Sender); +/// Subscriber of the `MsgPool`. It allows to await messages addressed to +/// a specific peer. pub(crate) struct MsgPoolRecv { recv: sync::watch::Receiver, next: usize, } impl MsgPool { - pub fn new() -> Self { - Self(sync::watch::channel(BTreeMap::new())) + /// Constructs an empty `MsgPool`. + pub(crate) fn new() -> Self { + Self(sync::watch::channel(BTreeMap::new()).0) } - pub fn send(&self, msg: ConsensusInputMessage) { + /// Inserts a message to the pool. + pub(crate) fn send(&self, msg: io::ConsensusInputMessage) { self.0.send_if_modified(|msgs| { - let next = msgs.0.last().unwrap_or(0)+1; - let (k,v) in &msgs.0 { + // Select a unique ID for the new message: using `last ID+1` is ok (will NOT cause + // an ID to be reused), because whenever we remove a message, we also insert a message. + let next = msgs.last_key_value().map_or(0, |(k, _)| k + 1); + // We first need to check if `msg` should actually be inserted. + let mut should_insert = true; + // Remove (at most) 1 message of the same type which is older. + msgs.retain(|_, v| { use validator::ConsensusMsg as M; - if match (&v.message.msg, &msg.message.msg) { - (M::ReplicaPrepare(a),M::ReplicaPrepare(b)) => a.view()>=b.view(), - (M::ReplicaCommit(a),M::ReplicaCommit(b)) => a.view()>=b.view(), - (M::LeaderPrepare(a),M::LeaderPrepare(b)) => a.view()>=b.view(), - (M::LeaderCommit(a),M::LeaderCommit(b)) => a.view()>=b.view(), - _ => continue, - } { - return false; + // Messages of other types stay in the pool. + // TODO(gprusak): internals of `ConsensusMsg` are essentially + // an implementation detail of the bft crate. Consider moving + // this logic there. + match (&v.message.msg, &msg.message.msg) { + (M::ReplicaPrepare(_), M::ReplicaPrepare(_)) => {} + (M::ReplicaCommit(_), M::ReplicaCommit(_)) => {} + (M::LeaderPrepare(_), M::LeaderPrepare(_)) => {} + (M::LeaderCommit(_), M::LeaderCommit(_)) => {} + _ => return true, + } + // If pool contains a message of the same type which is newer, + // then our message shouldn't be inserted. + if v.message.msg.view().number >= msg.message.msg.view().number { + should_insert = false; + return true; } - msgs.0.remove(k); + // An older message of the same type should be removed. + false + }); + if should_insert { + msgs.insert(next, msg); } - msgs.0.insert(next,msg); - true + // Notify receivers iff we have actually inserted a message. + should_insert }); } /// Subscribes to messages in the pool directed to `target`. - pub fn subscribe(&self, target: validator::PublicKey) -> MsgPoolRecv { + pub(crate) fn subscribe(&self) -> MsgPoolRecv { MsgPoolRecv { recv: self.0.subscribe(), next: 0, @@ -67,16 +92,21 @@ impl MsgPool { } impl MsgPoolRecv { - pub async fn recv(&mut self, ctx: &ctx::Ctx, target: &validator::PublicKey) -> ctx::OrCanceled { + /// Awaits a message addressed to `peer`. + pub(crate) async fn recv( + &mut self, + ctx: &ctx::Ctx, + peer: &validator::PublicKey, + ) -> ctx::OrCanceled> { loop { - for (k,v) in self.recv.borrow().range(self.next..) { - self.next = k+1; - match v.target { - Target::Broadcast => {} - Target::Validator(x) if x==target => {} + for (k, v) in self.recv.borrow().range(self.next..) { + self.next = k + 1; + match &v.recipient { + io::Target::Broadcast => {} + io::Target::Validator(x) if x == peer => {} _ => continue, } - return Ok(v.clone()); + return Ok(v.message.clone()); } sync::changed(ctx, &mut self.recv).await?; } @@ -93,7 +123,7 @@ pub(crate) struct Network { pub(crate) inbound: PoolWatch, /// Set of the currently open outbound connections. pub(crate) outbound: PoolWatch, - /// Last messages sent by this node. + /// Messages to be sent to validators. pub(crate) msg_pool: MsgPool, } @@ -116,7 +146,9 @@ impl rpc::Handler for &Network { msg: req.0, ack: send, })); - recv.recv_or_disconnected(ctx).await??; + // TODO(gprusak): disconnection means that there message was rejected OR + // that bft actor is missing (in tests), which leads to unnecesary disconnects. + let _ = recv.recv_or_disconnected(ctx).await?; Ok(rpc::consensus::Resp) } } @@ -131,56 +163,13 @@ impl Network { inbound: PoolWatch::new(validators.clone(), 0), outbound: PoolWatch::new(validators.clone(), 0), gossip, + msg_pool: MsgPool::new(), })) } - /// Sends a message to all validators. - pub(crate) async fn broadcast( - &self, - ctx: &ctx::Ctx, - msg: validator::Signed, - ) -> anyhow::Result<()> { - let req = rpc::consensus::Req(msg); - let outbound = self.outbound.current(); - scope::run!(ctx, |ctx, s| async { - for (peer, conn) in &outbound { - s.spawn(async { - if let Err(err) = conn.consensus.call(ctx, &req, RESP_MAX_SIZE).await { - tracing::info!( - "send({:?},{}): {err:#}", - &*peer, - rpc::consensus::Rpc::submethod(&req) - ); - } - Ok(()) - }); - } - Ok(()) - }) - .await - } - - /// Sends a message to the given validator. - pub(crate) async fn send( - &self, - ctx: &ctx::Ctx, - key: &validator::PublicKey, - msg: validator::Signed, - ) -> anyhow::Result<()> { - let outbound = self.outbound.current(); - let req = rpc::consensus::Req(msg); - outbound - .get(key) - .context("not reachable")? - .consensus - .call(ctx, &req, RESP_MAX_SIZE) - .await - .with_context(|| rpc::consensus::Rpc::submethod(&req))?; - Ok(()) - } - /// Performs handshake of an inbound stream. /// Closes the stream if there is another inbound stream opened from the same validator. + #[tracing::instrument(level = "info", name = "consensus", skip_all)] pub(crate) async fn run_inbound_stream( &self, ctx: &ctx::Ctx, @@ -189,7 +178,7 @@ impl Network { let peer = handshake::inbound(ctx, &self.key, self.gossip.genesis().hash(), &mut stream).await?; self.inbound.insert(peer.clone(), ()).await?; - tracing::info!("inbound connection from {peer:?}"); + tracing::info!("peer = {peer:?}"); let res = scope::run!(ctx, |ctx, s| async { let mut service = rpc::Service::new() .add_server(rpc::ping::Server, rpc::ping::RATE) @@ -210,6 +199,7 @@ impl Network { res } + #[tracing::instrument(level = "info", name = "consensus", skip_all)] async fn run_outbound_stream( &self, ctx: &ctx::Ctx, @@ -225,16 +215,14 @@ impl Network { peer, ) .await?; - let conn = Arc::new(Connection { - addr, - consensus: rpc::Client::new(ctx, self.gossip.cfg.rpc.consensus_rate), - }); - self.outbound.insert(peer.clone(), conn.clone()).await?; - tracing::info!("outbound connection to {peer:?}"); + self.outbound.insert(peer.clone(), ()).await?; + tracing::info!("peer = {peer:?}"); + let consensus_cli = + rpc::Client::::new(ctx, self.gossip.cfg.rpc.consensus_rate); let res = scope::run!(ctx, |ctx, s| async { let mut service = rpc::Service::new() .add_server(rpc::ping::Server, rpc::ping::RATE) - .add_client(&conn.consensus); + .add_client(&consensus_cli); if let Some(ping_timeout) = &self.gossip.cfg.ping_timeout { let ping_client = rpc::Client::::new(ctx, rpc::ping::RATE); service = service.add_client(&ping_client); @@ -243,6 +231,21 @@ impl Network { ping_client.ping_loop(ctx, *ping_timeout).await }); } + s.spawn::<()>(async { + let mut sub = self.msg_pool.subscribe(); + loop { + let call = consensus_cli.reserve(ctx).await?; + let msg = sub.recv(ctx, peer).await?; + s.spawn(async { + let req = rpc::consensus::Req(msg); + let res = call.call(ctx, &req, RESP_MAX_SIZE).await; + if let Err(err) = res { + tracing::info!("{err:#}"); + } + Ok(()) + }); + } + }); // If this is a loopback connection, announce periodically the address of this // validator to the network. // Note that this is executed only for outbound end of the loopback connection. @@ -286,7 +289,7 @@ impl Network { format!("{:?} resolved to no addresses", self.gossip.cfg.public_addr) })?; self.run_outbound_stream(ctx, &self.key.public(), addr) - .instrument(tracing::info_span!("{addr}")) + .instrument(tracing::info_span!("loopback", ?addr)) .await } @@ -318,7 +321,7 @@ impl Network { let Some(addr) = addr else { continue }; if let Err(err) = self .run_outbound_stream(ctx, peer, addr) - .instrument(tracing::info_span!("{addr}")) + .instrument(tracing::info_span!("out", ?addr)) .await { tracing::info!("run_outbound_stream({peer:?},{addr}): {err:#}"); diff --git a/node/actors/network/src/consensus/tests.rs b/node/actors/network/src/consensus/tests.rs index 2bb78550..8bf2c2be 100644 --- a/node/actors/network/src/consensus/tests.rs +++ b/node/actors/network/src/consensus/tests.rs @@ -215,8 +215,13 @@ async fn test_transmission() { for n in &mut nodes { n.wait_for_consensus_connections().await; } - for _ in 0..10 { - let want: validator::Signed = rng.gen(); + for i in 0..10 { + tracing::info!("message {i}"); + // Construct a message and ensure that view is increasing + // (otherwise the message could get filtered out). + let mut want: validator::Signed = rng.gen(); + want.msg.view.number = validator::ViewNumber(i); + let want: validator::Signed = want.cast().unwrap(); let in_message = io::ConsensusInputMessage { message: want.clone(), recipient: io::Target::Validator( @@ -225,14 +230,14 @@ async fn test_transmission() { }; nodes[0].pipe.send(in_message.into()); - let got = loop { + loop { let output_message = nodes[1].pipe.recv(ctx).await.unwrap(); - let io::OutputMessage::Consensus(got) = output_message else { - continue; + if let io::OutputMessage::Consensus(got) = output_message { + assert_eq!(want, got.msg); + tracing::info!("OK"); + break; }; - break got; - }; - assert_eq!(want, got.msg); + } } Ok(()) }) diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index e3568b13..93828db4 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -4,6 +4,7 @@ use anyhow::Context as _; use async_trait::async_trait; use rand::seq::SliceRandom; use std::sync::{atomic::Ordering, Arc}; +use tracing::Instrument as _; use zksync_concurrency::{ctx, net, oneshot, scope, sync}; use zksync_consensus_roles::node; use zksync_consensus_storage::BlockStore; @@ -55,7 +56,9 @@ impl rpc::Handler for PushBlockStoreStateServe response, }; self.net.sender.send(message.into()); - response_receiver.recv_or_disconnected(ctx).await??; + // TODO(gprusak): disconnection means that the message was rejected OR + // that `SyncBlocks` actor is missing (in tests), which leads to unnecesary disconnects. + let _ = response_receiver.recv_or_disconnected(ctx).await?; Ok(()) } } @@ -76,6 +79,7 @@ impl rpc::Handler for &BlockStore { impl Network { /// Manages lifecycle of a single connection. + #[tracing::instrument(level = "info", name = "gossip", skip_all)] async fn run_stream( &self, ctx: &ctx::Ctx, @@ -161,7 +165,7 @@ impl Network { ) -> anyhow::Result<()> { let peer = handshake::inbound(ctx, &self.cfg.gossip, self.genesis().hash(), &mut stream).await?; - tracing::Span::current().record("peer", tracing::field::debug(&peer)); + tracing::info!("peer = {peer:?}"); let conn = Arc::new(Connection { get_block: rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate), }); @@ -184,6 +188,7 @@ impl Network { .context("resolve()")? .choose(&mut ctx.rng()) .with_context(|| "{addr:?} resolved to empty address set")?; + let mut stream = preface::connect(ctx, addr, preface::Endpoint::GossipNet).await?; handshake::outbound( ctx, @@ -197,7 +202,10 @@ impl Network { get_block: rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate), }); self.outbound.insert(peer.clone(), conn.clone()).await?; - let res = self.run_stream(ctx, peer, stream, &conn).await; + let res = self + .run_stream(ctx, peer, stream, &conn) + .instrument(tracing::info_span!("out", ?addr)) + .await; self.outbound.remove(peer).await; res } diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index 218e0032..09439b0b 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -74,21 +74,16 @@ impl Network { ctx: &ctx::Ctx, message: io::InputMessage, ) -> anyhow::Result<()> { - /// Timeout for handling a consensus message. - const CONSENSUS_MSG_TIMEOUT: time::Duration = time::Duration::seconds(10); /// Timeout for a GetBlock RPC. const GET_BLOCK_TIMEOUT: time::Duration = time::Duration::seconds(10); match message { io::InputMessage::Consensus(message) => { - let consensus = self.consensus.as_ref().context("not a validator node")?; - let ctx = &ctx.with_timeout(CONSENSUS_MSG_TIMEOUT); - match message.recipient { - io::Target::Validator(key) => { - consensus.send(ctx, &key, message.message).await? - } - io::Target::Broadcast => consensus.broadcast(ctx, message.message).await?, - } + self.consensus + .as_ref() + .context("not a validator node")? + .msg_pool + .send(message); } io::InputMessage::SyncBlocks(io::SyncBlocksInputMessage::GetBlock { recipient, @@ -176,7 +171,7 @@ impl Runner { // This is a syscall which should always succeed on a correctly opened socket. let addr = stream.peer_addr().context("peer_addr()")?; let res = async { - tracing::info!("new inbound TCP connection from"); + tracing::info!("new connection"); let (stream, endpoint) = preface::accept(ctx, stream) .await .context("preface::accept()")?; @@ -198,7 +193,7 @@ impl Runner { } anyhow::Ok(()) } - .instrument(tracing::info_span!("{addr}")) + .instrument(tracing::info_span!("in", ?addr)) .await; if let Err(err) = res { tracing::info!("{addr}: {err:#}"); diff --git a/node/actors/network/src/rpc/mod.rs b/node/actors/network/src/rpc/mod.rs index 1e58ad17..2153189e 100644 --- a/node/actors/network/src/rpc/mod.rs +++ b/node/actors/network/src/rpc/mod.rs @@ -104,7 +104,8 @@ impl<'a, R: Rpc> ReservedCall<'a, R> { let now = ctx.now(); let metric_labels = CallLatencyType::ClientSendRecv.to_labels::(req, &res); RPC_METRICS.latency[&metric_labels].observe_latency(now - send_time); - let (res, msg_size) = res.context(R::METHOD)?; + let (res, msg_size) = + res.with_context(|| format!("{}.{}", R::METHOD, R::submethod(req)))?; RPC_METRICS.message_size[&CallType::RespRecv.to_labels::(req)].observe(msg_size); Ok(res) } diff --git a/node/libs/crypto/src/ed25519/tests.rs b/node/libs/crypto/src/ed25519/tests.rs index e51fbe1a..d1588cbd 100644 --- a/node/libs/crypto/src/ed25519/tests.rs +++ b/node/libs/crypto/src/ed25519/tests.rs @@ -1,5 +1,7 @@ -use crate::ed25519::{PublicKey, SecretKey, Signature}; -use crate::ByteFmt; +use crate::{ + ed25519::{PublicKey, SecretKey, Signature}, + ByteFmt, +}; #[test] fn test_ed25519() -> anyhow::Result<()> { From 36be3daba58703c5639892c2f3a6b037f0654837 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 11 Apr 2024 12:16:31 +0200 Subject: [PATCH 12/79] typo --- node/actors/network/src/consensus/mod.rs | 2 +- node/actors/network/src/gossip/runner.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index e83337e0..7b4d2578 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -147,7 +147,7 @@ impl rpc::Handler for &Network { ack: send, })); // TODO(gprusak): disconnection means that there message was rejected OR - // that bft actor is missing (in tests), which leads to unnecesary disconnects. + // that bft actor is missing (in tests), which leads to unnecessary disconnects. let _ = recv.recv_or_disconnected(ctx).await?; Ok(rpc::consensus::Resp) } diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index 93828db4..da2e5449 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -57,7 +57,7 @@ impl rpc::Handler for PushBlockStoreStateServe }; self.net.sender.send(message.into()); // TODO(gprusak): disconnection means that the message was rejected OR - // that `SyncBlocks` actor is missing (in tests), which leads to unnecesary disconnects. + // that `SyncBlocks` actor is missing (in tests), which leads to unnecessary disconnects. let _ = response_receiver.recv_or_disconnected(ctx).await?; Ok(()) } From 1b28fd7058fac642dc4dd8c27caec4887630c61a Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 11 Apr 2024 13:55:57 +0200 Subject: [PATCH 13/79] retransmission test --- node/actors/network/src/consensus/tests.rs | 47 ++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/node/actors/network/src/consensus/tests.rs b/node/actors/network/src/consensus/tests.rs index 8bf2c2be..37c514f8 100644 --- a/node/actors/network/src/consensus/tests.rs +++ b/node/actors/network/src/consensus/tests.rs @@ -244,3 +244,50 @@ async fn test_transmission() { .await .unwrap(); } + +/// Test that messages are retransmitted when a node gets reconnected. +#[tokio::test] +async fn test_retransmission() { + abort_on_panic(); + // Speed up is needed to make node0 reconnect to node1 fast. + let ctx = &ctx::test_root(&ctx::AffineClock::new(40.)); + let rng = &mut ctx.rng(); + + let setup = validator::testonly::Setup::new(rng, 2); + let cfgs = testonly::new_configs(rng, &setup, 1); + + scope::run!(ctx, |ctx, s| async { + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + + // Spawn the first node. + let (node0,runner) = testonly::Instance::new(cfgs[0].clone(), store.clone()); + s.spawn_bg(runner.run(ctx)); + + // Make first node broadcast a message. + let want: validator::Signed = rng.gen(); + node0.pipe.send(io::ConsensusInputMessage { + message: want.clone(), + recipient: io::Target::Broadcast, + }.into()); + + // Spawn the second node multiple times. + // Each time the node should reconnect and re-receive the broadcasted consensus message. + for i in 0..2 { + tracing::info!("iteration {i}"); + scope::run!(ctx, |ctx,s| async { + let (mut node1,runner) = testonly::Instance::new(cfgs[1].clone(), store.clone()); + s.spawn_bg(runner.run(ctx)); + loop { + if let io::OutputMessage::Consensus(got) = node1.pipe.recv(ctx).await.unwrap() { + assert_eq!(want, got.msg); + tracing::info!("OK"); + break; + } + } + Ok(()) + }).await.unwrap(); + } + Ok(()) + }).await.unwrap(); +} From c8d06efc3445cca4929504dfbfd7420418f6f5fd Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 11 Apr 2024 15:37:31 +0200 Subject: [PATCH 14/79] added tests --- node/actors/network/src/consensus/mod.rs | 28 ++--- node/actors/network/src/consensus/tests.rs | 106 ++++++++++++++++-- node/actors/network/src/lib.rs | 2 +- node/actors/network/src/testonly.rs | 28 ++++- node/libs/roles/src/validator/messages/msg.rs | 2 +- 5 files changed, 140 insertions(+), 26 deletions(-) diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 7b4d2578..1acf0c24 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -23,7 +23,7 @@ const RESP_MAX_SIZE: usize = kB; /// is down. const ADDRESS_ANNOUNCER_INTERVAL: time::Duration = time::Duration::minutes(10); -type MsgPoolInner = BTreeMap; +type MsgPoolInner = BTreeMap>; /// Pool of messages to send. /// It stores the newest message (with the highest view) of each type. @@ -44,7 +44,7 @@ impl MsgPool { } /// Inserts a message to the pool. - pub(crate) fn send(&self, msg: io::ConsensusInputMessage) { + pub(crate) fn send(&self, msg: Arc) { self.0.send_if_modified(|msgs| { // Select a unique ID for the new message: using `last ID+1` is ok (will NOT cause // an ID to be reused), because whenever we remove a message, we also insert a message. @@ -92,21 +92,15 @@ impl MsgPool { } impl MsgPoolRecv { - /// Awaits a message addressed to `peer`. + /// Awaits the next message. pub(crate) async fn recv( &mut self, ctx: &ctx::Ctx, - peer: &validator::PublicKey, - ) -> ctx::OrCanceled> { + ) -> ctx::OrCanceled> { loop { - for (k, v) in self.recv.borrow().range(self.next..) { + if let Some((k, v)) = self.recv.borrow().range(self.next..).next() { self.next = k + 1; - match &v.recipient { - io::Target::Broadcast => {} - io::Target::Validator(x) if x == peer => {} - _ => continue, - } - return Ok(v.message.clone()); + return Ok(v.clone()); } sync::changed(ctx, &mut self.recv).await?; } @@ -235,7 +229,15 @@ impl Network { let mut sub = self.msg_pool.subscribe(); loop { let call = consensus_cli.reserve(ctx).await?; - let msg = sub.recv(ctx, peer).await?; + let msg = loop { + let msg = sub.recv(ctx).await?; + match &msg.recipient { + io::Target::Broadcast => {} + io::Target::Validator(recipient) if recipient == peer => {} + _ => continue, + } + break msg.message.clone(); + }; s.spawn(async { let req = rpc::consensus::Req(msg); let res = call.call(ctx, &req, RESP_MAX_SIZE).await; diff --git a/node/actors/network/src/consensus/tests.rs b/node/actors/network/src/consensus/tests.rs index 37c514f8..aaf4f21e 100644 --- a/node/actors/network/src/consensus/tests.rs +++ b/node/actors/network/src/consensus/tests.rs @@ -2,9 +2,90 @@ use super::*; use crate::{io, metrics, preface, rpc, testonly}; use assert_matches::assert_matches; use rand::Rng; +use std::collections::HashSet; use zksync_concurrency::{ctx, net, scope, testonly::abort_on_panic}; use zksync_consensus_roles::validator; use zksync_consensus_storage::testonly::new_store; +use zksync_consensus_utils::enum_util::Variant as _; + +#[tokio::test] +async fn test_msg_pool() { + use validator::ConsensusMsg as M; + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let pool = MsgPool::new(); + + // Generate signed consensus messages of different types and views. + let key: validator::SecretKey = rng.gen(); + let gen = |f: &mut dyn FnMut() -> M| { + let mut x: Vec<_> = (0..5).map(|_| key.sign_msg(f())).collect(); + x.sort_by_key(|m| m.msg.view().number); + x + }; + // We keep them sorted by type and view, so that it is easy to + // compute the expected state of the pool after insertions. + let msgs = [ + gen(&mut || M::ReplicaPrepare(rng.gen())), + gen(&mut || M::ReplicaCommit(rng.gen())), + gen(&mut || M::LeaderPrepare(rng.gen())), + gen(&mut || M::LeaderCommit(rng.gen())), + ]; + + // Insert messages at random. + let mut want = vec![None; msgs.len()]; + for _ in 0..30 { + // Select a random message from `msgs` and insert it. + // Recompute the expected state. + let i = rng.gen_range(0..msgs.len()); + let j = rng.gen_range(0..msgs[i].len()); + want[i] = Some(want[i].unwrap_or(0).max(j)); + pool.send(Arc::new(io::ConsensusInputMessage { + message: msgs[i][j].clone(), + recipient: io::Target::Broadcast, + })); + // Here we compare the internal state of the pool to the expected state. + // Note that we compare sets of crypto hashes of messages, because the messages themselves do not + // implement Hash trait. As a result the error message won't be very helpful. + // If that's problematic, we can either make all the values implement Hash/PartialOrd. + let want: HashSet<_> = want + .iter() + .enumerate() + .filter_map(|(i, j)| j.map(|j| msgs[i][j].msg.clone().insert().hash())) + .collect(); + let mut recv = pool.subscribe(); + let mut got = HashSet::new(); + for _ in 0..want.len() { + got.insert( + recv.recv(ctx) + .await + .unwrap() + .message + .msg + .clone() + .insert() + .hash(), + ); + } + assert_eq!(got, want); + } +} + +#[tokio::test] +async fn test_msg_pool_recv() { + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let mut msgs: Vec = (0..20).map(|_| rng.gen()).collect(); + msgs.sort_by_key(|m| m.message.msg.view().number); + + let pool = MsgPool::new(); + let mut recv = pool.subscribe(); + for m in msgs { + let m = Arc::new(m); + pool.send(m.clone()); + assert_eq!(m, recv.recv(ctx).await.unwrap()); + } +} #[tokio::test] async fn test_one_connection_per_validator() { @@ -261,22 +342,25 @@ async fn test_retransmission() { s.spawn_bg(runner.run(ctx)); // Spawn the first node. - let (node0,runner) = testonly::Instance::new(cfgs[0].clone(), store.clone()); + let (node0, runner) = testonly::Instance::new(cfgs[0].clone(), store.clone()); s.spawn_bg(runner.run(ctx)); // Make first node broadcast a message. let want: validator::Signed = rng.gen(); - node0.pipe.send(io::ConsensusInputMessage { - message: want.clone(), - recipient: io::Target::Broadcast, - }.into()); + node0.pipe.send( + io::ConsensusInputMessage { + message: want.clone(), + recipient: io::Target::Broadcast, + } + .into(), + ); // Spawn the second node multiple times. // Each time the node should reconnect and re-receive the broadcasted consensus message. for i in 0..2 { tracing::info!("iteration {i}"); - scope::run!(ctx, |ctx,s| async { - let (mut node1,runner) = testonly::Instance::new(cfgs[1].clone(), store.clone()); + scope::run!(ctx, |ctx, s| async { + let (mut node1, runner) = testonly::Instance::new(cfgs[1].clone(), store.clone()); s.spawn_bg(runner.run(ctx)); loop { if let io::OutputMessage::Consensus(got) = node1.pipe.recv(ctx).await.unwrap() { @@ -286,8 +370,12 @@ async fn test_retransmission() { } } Ok(()) - }).await.unwrap(); + }) + .await + .unwrap(); } Ok(()) - }).await.unwrap(); + }) + .await + .unwrap(); } diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index 09439b0b..7fa38c19 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -83,7 +83,7 @@ impl Network { .as_ref() .context("not a validator node")? .msg_pool - .send(message); + .send(Arc::new(message)); } io::InputMessage::SyncBlocks(io::SyncBlocksInputMessage::GetBlock { recipient, diff --git a/node/actors/network/src/testonly.rs b/node/actors/network/src/testonly.rs index 0241ed8e..ef6bdcbf 100644 --- a/node/actors/network/src/testonly.rs +++ b/node/actors/network/src/testonly.rs @@ -1,7 +1,13 @@ //! Testonly utilities. #![allow(dead_code)] -use crate::{Config, GossipConfig, Network, RpcConfig, Runner}; -use rand::Rng; +use crate::{ + io::{ConsensusInputMessage, Target}, + Config, GossipConfig, Network, RpcConfig, Runner, +}; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -11,6 +17,24 @@ use zksync_consensus_roles::{node, validator}; use zksync_consensus_storage::BlockStore; use zksync_consensus_utils::pipe; +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Target { + match rng.gen_range(0..2) { + 0 => Target::Broadcast, + _ => Target::Validator(rng.gen()), + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ConsensusInputMessage { + ConsensusInputMessage { + message: rng.gen(), + recipient: rng.gen(), + } + } +} + /// Synchronously forwards data from one stream to another. pub(crate) async fn forward( ctx: &ctx::Ctx, diff --git a/node/libs/roles/src/validator/messages/msg.rs b/node/libs/roles/src/validator/messages/msg.rs index ea0c2778..cc5a047e 100644 --- a/node/libs/roles/src/validator/messages/msg.rs +++ b/node/libs/roles/src/validator/messages/msg.rs @@ -60,7 +60,7 @@ impl Variant for NetAddress { } /// Hash of a message. -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct MsgHash(pub(crate) keccak256::Keccak256); impl ByteFmt for MsgHash { From 96be90bfdf27d99ab5c69eb97a8c8781c351f340 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Fri, 12 Apr 2024 16:27:41 +0200 Subject: [PATCH 15/79] removed limiting from sync_blocks --- node/actors/sync_blocks/Cargo.toml | 4 +- node/actors/sync_blocks/src/peers/mod.rs | 45 +++---------------- .../actors/sync_blocks/src/peers/tests/mod.rs | 3 +- .../src/peers/tests/multiple_peers.rs | 4 +- 4 files changed, 10 insertions(+), 46 deletions(-) diff --git a/node/actors/sync_blocks/Cargo.toml b/node/actors/sync_blocks/Cargo.toml index 38007153..b478460d 100644 --- a/node/actors/sync_blocks/Cargo.toml +++ b/node/actors/sync_blocks/Cargo.toml @@ -14,15 +14,15 @@ zksync_consensus_storage.workspace = true zksync_consensus_utils.workspace = true anyhow.workspace = true +rand.workspace = true thiserror.workspace = true tracing.workspace = true [dev-dependencies] assert_matches.workspace = true async-trait.workspace = true -rand.workspace = true test-casing.workspace = true tokio.workspace = true [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/node/actors/sync_blocks/src/peers/mod.rs b/node/actors/sync_blocks/src/peers/mod.rs index bc589ccb..a3a7cafc 100644 --- a/node/actors/sync_blocks/src/peers/mod.rs +++ b/node/actors/sync_blocks/src/peers/mod.rs @@ -1,4 +1,5 @@ //! Peer states tracked by the `SyncBlocks` actor. +#![allow(unused)] use self::events::PeerStateEvent; use crate::{io, Config}; use anyhow::Context as _; @@ -142,13 +143,11 @@ impl PeerStates { number: BlockNumber, ) -> ctx::OrCanceled { while ctx.is_active() { - let Some((peer, permit)) = self.try_acquire_peer_permit(number) else { - let sleep_interval = self.config.sleep_interval_for_get_block; - ctx.sleep(sleep_interval).await?; + let Some(peer) = self.select_peer(number) else { + ctx.sleep(self.config.sleep_interval_for_get_block).await?; continue; }; let res = self.fetch_block_from_peer(ctx, &peer, number).await; - drop(permit); match res { Ok(block) => { if let Some(send) = &self.events_sender { @@ -204,43 +203,9 @@ impl PeerStates { Ok(block) } - fn try_acquire_peer_permit( - &self, - block_number: BlockNumber, - ) -> Option<(node::PublicKey, sync::OwnedSemaphorePermit)> { + fn select_peer(&self, block_number: BlockNumber) -> Option { let peers = self.peers.lock().unwrap(); - let mut peers_with_no_permits = vec![]; - let eligible_peers_info = peers.iter().filter(|(peer_key, state)| { - if !state.state.contains(block_number) { - return false; - } - let available_permits = state.get_block_semaphore.available_permits(); - // ^ `available_permits()` provides a lower bound on the actual number of available permits. - // Some permits may be released before acquiring a new permit below, but no other permits - // are acquired since we hold an exclusive lock on `peers`. - if available_permits == 0 { - peers_with_no_permits.push(*peer_key); - } - available_permits > 0 - }); - let peer_to_query = eligible_peers_info - .max_by_key(|(_, state)| state.get_block_semaphore.available_permits()); - - if let Some((peer_key, state)) = peer_to_query { - let permit = state - .get_block_semaphore - .clone() - .try_acquire_owned() - .unwrap(); - // ^ `unwrap()` is safe for the reasons described in the above comment - Some((peer_key.clone(), permit)) - } else { - tracing::debug!( - ?peers_with_no_permits, - "No peers to query block #{block_number}" - ); - None - } + peers.iter().filter(|(_,s)|s.state.contains(block_number)).next().map(|x|x.0.clone()) } /// Drops peer state. diff --git a/node/actors/sync_blocks/src/peers/tests/mod.rs b/node/actors/sync_blocks/src/peers/tests/mod.rs index ee28e436..86181da8 100644 --- a/node/actors/sync_blocks/src/peers/tests/mod.rs +++ b/node/actors/sync_blocks/src/peers/tests/mod.rs @@ -139,8 +139,7 @@ async fn test_try_acquire_peer_permit() { peer_states.update(&peer, s.clone()).unwrap(); for block in b { let got = peer_states - .try_acquire_peer_permit(block.number()) - .map(|p| p.0); + .select_peer(block.number()); if s.first <= block.number() && s.last .as_ref() diff --git a/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs b/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs index 79842a48..4281c69c 100644 --- a/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs +++ b/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs @@ -146,10 +146,10 @@ impl Test for RequestingBlocksFromTwoPeers { } } -#[tokio::test] +/*#[tokio::test] async fn requesting_blocks_from_two_peers() { test_peer_states(RequestingBlocksFromTwoPeers).await; -} +}*/ #[derive(Debug, Clone, Copy)] struct PeerBehavior { From 16693df88db9e123e9b4a74c8a2883051165a40a Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Fri, 12 Apr 2024 16:42:25 +0200 Subject: [PATCH 16/79] cargo fmt --- node/actors/sync_blocks/src/peers/mod.rs | 4 +++- node/actors/sync_blocks/src/peers/tests/mod.rs | 3 +-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/node/actors/sync_blocks/src/peers/mod.rs b/node/actors/sync_blocks/src/peers/mod.rs index a3a7cafc..4861a2e1 100644 --- a/node/actors/sync_blocks/src/peers/mod.rs +++ b/node/actors/sync_blocks/src/peers/mod.rs @@ -205,7 +205,9 @@ impl PeerStates { fn select_peer(&self, block_number: BlockNumber) -> Option { let peers = self.peers.lock().unwrap(); - peers.iter().filter(|(_,s)|s.state.contains(block_number)).next().map(|x|x.0.clone()) + peers + .iter().find(|(_, s)| s.state.contains(block_number)) + .map(|x| x.0.clone()) } /// Drops peer state. diff --git a/node/actors/sync_blocks/src/peers/tests/mod.rs b/node/actors/sync_blocks/src/peers/tests/mod.rs index 86181da8..01120c5b 100644 --- a/node/actors/sync_blocks/src/peers/tests/mod.rs +++ b/node/actors/sync_blocks/src/peers/tests/mod.rs @@ -138,8 +138,7 @@ async fn test_try_acquire_peer_permit() { ] { peer_states.update(&peer, s.clone()).unwrap(); for block in b { - let got = peer_states - .select_peer(block.number()); + let got = peer_states.select_peer(block.number()); if s.first <= block.number() && s.last .as_ref() From c9935c0fa69cde357a3d6f5eca148962dd3313e1 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Fri, 12 Apr 2024 16:49:25 +0200 Subject: [PATCH 17/79] cargo fmt --- node/actors/sync_blocks/src/peers/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/node/actors/sync_blocks/src/peers/mod.rs b/node/actors/sync_blocks/src/peers/mod.rs index 4861a2e1..713fc681 100644 --- a/node/actors/sync_blocks/src/peers/mod.rs +++ b/node/actors/sync_blocks/src/peers/mod.rs @@ -206,7 +206,8 @@ impl PeerStates { fn select_peer(&self, block_number: BlockNumber) -> Option { let peers = self.peers.lock().unwrap(); peers - .iter().find(|(_, s)| s.state.contains(block_number)) + .iter() + .find(|(_, s)| s.state.contains(block_number)) .map(|x| x.0.clone()) } From 96a50de0886d96b5598a9a5529ee51541bc3a2db Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Apr 2024 13:22:50 -0300 Subject: [PATCH 18/79] Fix clippy lints --- node/actors/network/src/consensus/mod.rs | 6 ++---- node/actors/network/src/rpc/signature.rs | 2 ++ node/libs/roles/src/validator/messages/l1_batch.rs | 12 ++++++++---- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index e493f9f2..6835133a 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -42,6 +42,7 @@ pub(crate) struct Network { pub(crate) inbound: PoolWatch, /// Set of the currently open outbound connections. pub(crate) outbound: PoolWatch>, + /// L1 batch QC. pub(crate) l1_batch_qc: L1BatchQC, } @@ -79,10 +80,7 @@ impl rpc::Handler for &L1BatchServer<'_> { async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::signature::Req) -> anyhow::Result<()> { let genesis = self.0.gossip.genesis(); self.0.l1_batch_qc.verify(genesis).unwrap(); - self.0 - .l1_batch_qc - .clone() - .add(req.0.sig.clone(), &req.0, genesis); + self.0.l1_batch_qc.clone().add(&req.0, genesis); return Ok(()); } } diff --git a/node/actors/network/src/rpc/signature.rs b/node/actors/network/src/rpc/signature.rs index 024c9751..34e2b327 100644 --- a/node/actors/network/src/rpc/signature.rs +++ b/node/actors/network/src/rpc/signature.rs @@ -13,6 +13,8 @@ impl super::Rpc for Rpc { type Req = Req; type Resp = (); } + +/// RPC server for the L1 batch messages. pub(crate) struct L1BatchServer<'a>(pub(crate) &'a Network); /// Signed consensus message that the receiving peer should process. #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/node/libs/roles/src/validator/messages/l1_batch.rs b/node/libs/roles/src/validator/messages/l1_batch.rs index a657fbc4..ae23b1a0 100644 --- a/node/libs/roles/src/validator/messages/l1_batch.rs +++ b/node/libs/roles/src/validator/messages/l1_batch.rs @@ -1,11 +1,11 @@ -use crate::validator::{self, Signature}; +use crate::validator::{self}; use super::{Genesis, Signed}; /// A message to send by validators to the gossip network. /// It contains the validators signature to sign the block batches to be sent to L1. #[derive(Debug, Clone, Eq, PartialEq, Default)] -pub struct L1BatchMsg(); +pub struct L1BatchMsg; /// A certificate for a batch of L2 blocks to be sent to L1. /// It contains the signatures of the validators that signed the batch. @@ -19,6 +19,7 @@ pub struct L1BatchQC { pub message: L1BatchMsg, } +/// Error returned by `L1BatchQC::verify()` if the signature is invalid. #[derive(thiserror::Error, Debug)] pub enum L1BatchQCVerifyError { /// Bad signature. @@ -27,15 +28,18 @@ pub enum L1BatchQCVerifyError { } impl L1BatchQC { + /// Create a new empty instance for a given `ReplicaCommit` message and a validator set size. pub fn new(validators: usize) -> Self { Self { signature: validator::AggregateSignature::default(), signers: validator::Signers::new(validators), - message: L1BatchMsg::default(), + message: L1BatchMsg, } } - pub fn add(&mut self, signature: Signature, msg: &Signed, genesis: &Genesis) { + /// Add a attester's signature. + /// Signature is assumed to be already verified. + pub fn add(&mut self, msg: &Signed, genesis: &Genesis) { if self.message != msg.msg { return; }; From 1dcda70c1c25d0e4db6781ba8d2645d7e8966d49 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Wed, 17 Apr 2024 12:34:32 +0200 Subject: [PATCH 19/79] Added support for persisting blocks in parallel (#98) Statekeeper is storing blocks to postgres in parallel to processing next blocks. Our simplified API so far required persisting one block to finish before processing the next block. This effectively disabled the parallelization in statekeeper and therefore the EN with consensus enabled was significantly slower than EN without consensus. This PR updates the api to allow persisting multiple blocks in parallel. Long term this change most likely won't be that useful, because statekeeper of a validator node will have to work synchronously with consensus (but that requires improving statekeeper anyway). I've also implemented caching inmemory the most recent blocks (even if they got persisted) to address the inefficiency found during the loadtest. --- node/.config/nextest.toml | 1 + node/actors/bft/src/replica/block.rs | 3 +- node/actors/bft/src/replica/leader_prepare.rs | 2 +- node/actors/bft/src/replica/tests.rs | 3 +- node/actors/bft/src/testonly/run.rs | 14 +- node/actors/executor/src/tests.rs | 49 ++-- node/actors/network/src/gossip/runner.rs | 14 +- node/actors/network/src/lib.rs | 14 +- node/actors/sync_blocks/src/peers/mod.rs | 2 +- .../sync_blocks/src/tests/end_to_end.rs | 4 +- node/libs/concurrency/src/net/mod.rs | 9 +- node/libs/storage/src/block_store/metrics.rs | 2 +- node/libs/storage/src/block_store/mod.rs | 210 ++++++++++-------- node/libs/storage/src/testonly/in_memory.rs | 38 ++-- node/libs/storage/src/testonly/mod.rs | 4 +- node/libs/storage/src/tests.rs | 15 +- .../src/rpc/methods/last_committed_block.rs | 3 +- node/tools/src/rpc/methods/last_view.rs | 5 +- node/tools/src/store.rs | 50 +++-- node/tools/src/tests.rs | 7 +- 20 files changed, 244 insertions(+), 205 deletions(-) diff --git a/node/.config/nextest.toml b/node/.config/nextest.toml index c992c4de..ae557799 100644 --- a/node/.config/nextest.toml +++ b/node/.config/nextest.toml @@ -20,6 +20,7 @@ final-status-level = "slow" # Profile for CI runs. [profile.ci] +slow-timeout = { period = "60s", terminate-after = 2, grace-period = "0s" } # "retries" defines the number of times a test should be retried. If set to a # non-zero value, tests that succeed on a subsequent attempt will be marked as # non-flaky. Can be overridden through the `--retries` option. diff --git a/node/actors/bft/src/replica/block.rs b/node/actors/bft/src/replica/block.rs index 20ca6f3b..7834b5e7 100644 --- a/node/actors/bft/src/replica/block.rs +++ b/node/actors/bft/src/replica/block.rs @@ -1,5 +1,4 @@ use super::StateMachine; -use tracing::instrument; use zksync_concurrency::ctx; use zksync_consensus_roles::validator; @@ -7,7 +6,7 @@ impl StateMachine { /// Tries to build a finalized block from the given CommitQC. We simply search our /// block proposal cache for the matching block, and if we find it we build the block. /// If this method succeeds, it sends the finalized block to the executor. - #[instrument(level = "debug", skip(self), ret)] + #[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn save_block( &mut self, ctx: &ctx::Ctx, diff --git a/node/actors/bft/src/replica/leader_prepare.rs b/node/actors/bft/src/replica/leader_prepare.rs index c4d55de8..75a66b6b 100644 --- a/node/actors/bft/src/replica/leader_prepare.rs +++ b/node/actors/bft/src/replica/leader_prepare.rs @@ -115,7 +115,7 @@ impl StateMachine { // (because it won't be able to persist and broadcast them once finalized). // TODO(gprusak): it should never happen, we should add safety checks to prevent // pruning blocks not known to be finalized. - if message.proposal.number < self.config.block_store.subscribe().borrow().first { + if message.proposal.number < self.config.block_store.queued().first { return Err(Error::ProposalAlreadyPruned); } diff --git a/node/actors/bft/src/replica/tests.rs b/node/actors/bft/src/replica/tests.rs index c1b5be08..ca720a36 100644 --- a/node/actors/bft/src/replica/tests.rs +++ b/node/actors/bft/src/replica/tests.rs @@ -177,8 +177,7 @@ async fn leader_prepare_pruned_block() { .replica .config .block_store - .subscribe() - .borrow() + .queued() .first .prev() .unwrap(); diff --git a/node/actors/bft/src/testonly/run.rs b/node/actors/bft/src/testonly/run.rs index af06ea04..a1899bee 100644 --- a/node/actors/bft/src/testonly/run.rs +++ b/node/actors/bft/src/testonly/run.rs @@ -1,7 +1,7 @@ use super::{Behavior, Node}; use std::collections::HashMap; use tracing::Instrument as _; -use zksync_concurrency::{ctx, oneshot, scope, sync}; +use zksync_concurrency::{ctx, oneshot, scope}; use zksync_consensus_network as network; use zksync_consensus_roles::validator; use zksync_consensus_storage::testonly::new_store; @@ -46,18 +46,16 @@ impl Test { s.spawn_bg(run_nodes(ctx, self.network, &nodes)); // Run the nodes until all honest nodes store enough finalized blocks. + assert!(self.blocks_to_finalize > 0); let first = setup.genesis.fork.first_block; - let want_next = validator::BlockNumber(first.0 + self.blocks_to_finalize as u64); + let last = first + (self.blocks_to_finalize as u64 - 1); for store in &honest { - sync::wait_for(ctx, &mut store.subscribe(), |state| { - state.next() > want_next - }) - .await?; + store.wait_until_queued(ctx, last).await?; } // Check that the stored blocks are consistent. - for i in 0..self.blocks_to_finalize as u64 + 1 { - let i = validator::BlockNumber(i); + for i in 0..self.blocks_to_finalize as u64 { + let i = first + i; let want = honest[0].block(ctx, i).await?; for store in &honest[1..] { assert_eq!(want, store.block(ctx, i).await?); diff --git a/node/actors/executor/src/tests.rs b/node/actors/executor/src/tests.rs index 7ee8394d..ebae04fb 100644 --- a/node/actors/executor/src/tests.rs +++ b/node/actors/executor/src/tests.rs @@ -1,5 +1,6 @@ //! High-level tests for `Executor`. use super::*; +use tracing::Instrument as _; use zksync_concurrency::testonly::abort_on_panic; use zksync_consensus_bft as bft; use zksync_consensus_network::testonly::{new_configs, new_fullnode}; @@ -105,19 +106,17 @@ async fn test_validator_syncing_from_fullnode() { let setup = Setup::new(rng, 1); let cfgs = new_configs(rng, &setup, 0); scope::run!(ctx, |ctx, s| async { - // Spawn full node. - let (node_store, runner) = new_store(ctx, &setup.genesis).await; - s.spawn_bg(runner.run(ctx)); - s.spawn_bg(fullnode(&new_fullnode(rng, &cfgs[0]), node_store.clone()).run(ctx)); - // Run validator and produce some blocks. - // Wait for the blocks to be fetched by the full node. let replica_store = in_memory::ReplicaStore::default(); + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); scope::run!(ctx, |ctx, s| async { - let (store, runner) = new_store(ctx, &setup.genesis).await; - s.spawn_bg(runner.run(ctx)); - s.spawn_bg(validator(&cfgs[0], store, replica_store.clone()).run(ctx)); - node_store + s.spawn_bg( + validator(&cfgs[0], store.clone(), replica_store.clone()) + .run(ctx) + .instrument(tracing::info_span!("validator")), + ); + store .wait_until_persisted(ctx, setup.genesis.fork.first_block + 4) .await?; Ok(()) @@ -125,23 +124,31 @@ async fn test_validator_syncing_from_fullnode() { .await .unwrap(); + // Spawn full node with storage used previously by validator -this ensures that + // all finalized blocks are available: if we ran a fullnode in parallel to the + // validator, there would be a race condition between fullnode syncing and validator + // terminating. + s.spawn_bg( + fullnode(&new_fullnode(rng, &cfgs[0]), store.clone()) + .run(ctx) + .instrument(tracing::info_span!("fullnode")), + ); + // Restart the validator with empty store (but preserved replica state) and non-trivial // `store.state.first`. // Validator should fetch the past blocks from the full node before producing next blocks. - let last_block = node_store - .subscribe() - .borrow() - .last - .as_ref() - .unwrap() - .header() - .number; - let (store, runner) = + let last_block = store.queued().last.as_ref().unwrap().header().number; + let (store2, runner) = new_store_with_first(ctx, &setup.genesis, setup.genesis.fork.first_block + 2).await; s.spawn_bg(runner.run(ctx)); - s.spawn_bg(validator(&cfgs[0], store, replica_store).run(ctx)); - node_store.wait_until_persisted(ctx, last_block + 3).await?; + s.spawn_bg( + validator(&cfgs[0], store2, replica_store) + .run(ctx) + .instrument(tracing::info_span!("validator")), + ); + // Wait for the fullnode to fetch the new blocks. + store.wait_until_persisted(ctx, last_block + 3).await?; Ok(()) }) .await diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index da2e5449..f810d1a6 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -79,7 +79,6 @@ impl rpc::Handler for &BlockStore { impl Network { /// Manages lifecycle of a single connection. - #[tracing::instrument(level = "info", name = "gossip", skip_all)] async fn run_stream( &self, ctx: &ctx::Ctx, @@ -124,12 +123,14 @@ impl Network { // Push block store state updates to peer. s.spawn::<()>(async { - let mut sub = self.block_store.subscribe(); - sub.mark_changed(); + let mut state = self.block_store.queued(); loop { - let state = sync::changed(ctx, &mut sub).await?.clone(); - let req = rpc::push_block_store_state::Req(state); + let req = rpc::push_block_store_state::Req(state.clone()); push_block_store_state_client.call(ctx, &req, kB).await?; + state = self + .block_store + .wait_until_queued(ctx, state.next()) + .await?; } }); @@ -158,6 +159,7 @@ impl Network { /// Handles an inbound stream. /// Closes the stream if there is another inbound stream opened from the same peer. + #[tracing::instrument(level = "info", name = "gossip", skip_all)] pub(crate) async fn run_inbound_stream( &self, ctx: &ctx::Ctx, @@ -176,6 +178,7 @@ impl Network { } /// Connects to a peer and handles the resulting stream. + #[tracing::instrument(level = "info", name = "gossip", skip_all)] pub(crate) async fn run_outbound_stream( &self, ctx: &ctx::Ctx, @@ -198,6 +201,7 @@ impl Network { peer, ) .await?; + tracing::info!("peer = {peer:?}"); let conn = Arc::new(Connection { get_block: rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate), }); diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index 7fa38c19..ed154333 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -130,19 +130,19 @@ impl Runner { // Maintain static gossip connections. for (peer, addr) in &self.net.gossip.cfg.gossip.static_outbound { - s.spawn(async { + s.spawn::<()>(async { + let addr = &*addr; loop { - let run_result = self + let res = self .net .gossip .run_outbound_stream(ctx, peer, addr.clone()) + .instrument(tracing::info_span!("out", ?addr)) .await; - if let Err(err) = run_result { - tracing::info!("gossip.run_outbound_stream(): {err:#}"); - } - if let Err(ctx::Canceled) = ctx.sleep(CONNECT_RETRY).await { - return Ok(()); + if let Err(err) = res { + tracing::info!("gossip.run_outbound_stream({addr:?}): {err:#}"); } + ctx.sleep(CONNECT_RETRY).await?; } }); } diff --git a/node/actors/sync_blocks/src/peers/mod.rs b/node/actors/sync_blocks/src/peers/mod.rs index 713fc681..6c838a1c 100644 --- a/node/actors/sync_blocks/src/peers/mod.rs +++ b/node/actors/sync_blocks/src/peers/mod.rs @@ -100,7 +100,7 @@ impl PeerStates { pub(crate) async fn run_block_fetcher(&self, ctx: &ctx::Ctx) -> ctx::Result<()> { let sem = sync::Semaphore::new(self.config.max_concurrent_blocks); scope::run!(ctx, |ctx, s| async { - let mut next = self.storage.subscribe().borrow().next(); + let mut next = self.storage.queued().next(); let mut highest_peer_block = self.highest_peer_block.subscribe(); loop { sync::wait_for(ctx, &mut highest_peer_block, |highest_peer_block| { diff --git a/node/actors/sync_blocks/src/tests/end_to_end.rs b/node/actors/sync_blocks/src/tests/end_to_end.rs index 77e3affd..0a0f1849 100644 --- a/node/actors/sync_blocks/src/tests/end_to_end.rs +++ b/node/actors/sync_blocks/src/tests/end_to_end.rs @@ -192,7 +192,7 @@ impl GossipNetworkTest for BasicSynchronization { tracing::info!("Check initial node states"); for node in &nodes { node.start(); - let state = node.store.subscribe().borrow().clone(); + let state = node.store.queued(); assert_eq!(state.first, setup.genesis.fork.first_block); assert_eq!(state.last, None); } @@ -372,7 +372,7 @@ async fn test_different_first_block() { // Find nodes interested in the next block. let interested_nodes: Vec<_> = nodes .iter() - .filter(|n| n.store.subscribe().borrow().first <= block.number()) + .filter(|n| n.store.queued().first <= block.number()) .collect(); // Store this block to one of them. if let Some(node) = interested_nodes.choose(rng) { diff --git a/node/libs/concurrency/src/net/mod.rs b/node/libs/concurrency/src/net/mod.rs index 8a507737..07a2b710 100644 --- a/node/libs/concurrency/src/net/mod.rs +++ b/node/libs/concurrency/src/net/mod.rs @@ -1,6 +1,7 @@ //! Context-aware network utilities. //! Built on top of `tokio::net`. use crate::ctx; +use std::fmt; pub mod tcp; @@ -11,9 +12,15 @@ mod tests; /// NOT VALIDATED, validation happens at `Host::resolve()` call. // TODO: for better type safety consider verifying host to be in the valid // format in constructor. -#[derive(Debug, Clone, PartialEq)] +#[derive(Clone, PartialEq)] pub struct Host(pub String); +impl fmt::Debug for Host { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(fmt) + } +} + impl From for Host { fn from(addr: std::net::SocketAddr) -> Self { Self(addr.to_string()) diff --git a/node/libs/storage/src/block_store/metrics.rs b/node/libs/storage/src/block_store/metrics.rs index 05b64133..34954f69 100644 --- a/node/libs/storage/src/block_store/metrics.rs +++ b/node/libs/storage/src/block_store/metrics.rs @@ -15,7 +15,7 @@ pub(super) struct PersistentBlockStore { pub(super) block_latency: vise::Histogram, /// Latency of a successful `store_next_block()` call. #[metrics(unit = vise::Unit::Seconds, buckets = vise::Buckets::LATENCIES)] - pub(super) store_next_block_latency: vise::Histogram, + pub(super) queue_next_block_latency: vise::Histogram, } #[vise::register] diff --git a/node/libs/storage/src/block_store/mod.rs b/node/libs/storage/src/block_store/mod.rs index e769aecf..c906e607 100644 --- a/node/libs/storage/src/block_store/mod.rs +++ b/node/libs/storage/src/block_store/mod.rs @@ -1,7 +1,7 @@ //! Defines storage layer for finalized blocks. use anyhow::Context as _; use std::{collections::VecDeque, fmt, sync::Arc}; -use zksync_concurrency::{ctx, error::Wrap as _, sync}; +use zksync_concurrency::{ctx, error::Wrap as _, scope, sync}; use zksync_consensus_roles::validator; mod metrics; @@ -63,38 +63,73 @@ pub trait PersistentBlockStore: 'static + fmt::Debug + Send + Sync { /// Consensus code calls this method only once. async fn genesis(&self, ctx: &ctx::Ctx) -> ctx::Result; - /// Range of blocks available in storage. - /// Consensus code calls this method only once and then tracks the - /// range of available blocks internally. - async fn state(&self, ctx: &ctx::Ctx) -> ctx::Result; + /// Range of blocks persisted in storage. + fn persisted(&self) -> sync::watch::Receiver; /// Gets a block by its number. + /// All the blocks from `state()` range are expected to be available. + /// Blocks that have been queued but haven't been persisted yet don't have to be available. /// Returns error if block is missing. - /// Caller is expected to know the state (by calling `state()`) - /// and only request the blocks contained in the state. async fn block( &self, ctx: &ctx::Ctx, number: validator::BlockNumber, ) -> ctx::Result; - /// Persistently store a block. - /// Implementations are only required to accept a block directly after the current last block, - /// so that the stored blocks always constitute a continuous range. - /// Implementation should return only after the block is stored PERSISTENTLY - - /// consensus liveness property depends on this behavior. - async fn store_next_block( + /// Queue the block to be persisted in storage. + /// `queue_next_block()` may return BEFORE the block is actually persisted, + /// but if the call succeeded the block is expected to be persisted eventually. + /// Implementations are only required to accept a block directly after the previous queued + /// block, starting with `persisted().borrow().next()`. + async fn queue_next_block( &self, ctx: &ctx::Ctx, - block: &validator::FinalBlock, + block: validator::FinalBlock, ) -> ctx::Result<()>; } #[derive(Debug)] struct Inner { - queued_state: sync::watch::Sender, - persisted_state: BlockStoreState, - queue: VecDeque, + queued: BlockStoreState, + persisted: BlockStoreState, + cache: VecDeque, +} + +impl Inner { + /// Minimal number of most recent blocks to keep in memory. + /// It allows to serve the recent blocks to peers fast, even + /// if persistent storage reads are slow (like in RocksDB). + /// `BlockStore` may keep in memory more blocks in case + /// blocks are queued faster than they are persisted. + const CACHE_CAPACITY: usize = 100; + + /// Tries to push the next block to cache. + /// Noop if provided block is not the expected one. + /// Returns true iff cache has been modified. + fn try_push(&mut self, block: validator::FinalBlock) -> bool { + if self.queued.next() != block.number() { + return false; + } + self.queued.last = Some(block.justification.clone()); + self.cache.push_back(block); + self.truncate_cache(); + true + } + + fn truncate_cache(&mut self) { + while self.cache.len() > Self::CACHE_CAPACITY + && self.persisted.contains(self.cache[0].number()) + { + self.cache.pop_front(); + } + } + + fn block(&self, n: validator::BlockNumber) -> Option { + // Subtraction is safe, because blocks in cache are + // stored in increasing order of block number. + let first = self.cache.front()?; + self.cache.get((n.0 - first.number().0) as usize).cloned() + } } /// A wrapper around a PersistentBlockStore which adds caching blocks in-memory @@ -118,33 +153,37 @@ impl BlockStoreRunner { let store_ref = Arc::downgrade(&self.0); let _ = COLLECTOR.before_scrape(move || Some(store_ref.upgrade()?.scrape_metrics())); - let res = async { + let res = scope::run!(ctx, |ctx, s| async { + let persisted = self.0.persistent.persisted(); + let mut queue_next = persisted.borrow().next(); + // Task truncating cache whenever a block gets persisted. + s.spawn::<()>(async { + let mut persisted = persisted; + loop { + let persisted = sync::changed(ctx, &mut persisted).await?.clone(); + self.0.inner.send_modify(|inner| { + inner.persisted = persisted; + inner.truncate_cache(); + }); + } + }); + // Task queueing blocks to be persisted. let inner = &mut self.0.inner.subscribe(); loop { - let block = sync::wait_for(ctx, inner, |inner| !inner.queue.is_empty()) + let block = sync::wait_for(ctx, inner, |inner| inner.queued.contains(queue_next)) .await? - .queue[0] - .clone(); + .block(queue_next) + .unwrap(); + queue_next = queue_next.next(); // TODO: monitor errors as well. let t = metrics::PERSISTENT_BLOCK_STORE - .store_next_block_latency + .queue_next_block_latency .start(); - self.0.persistent.store_next_block(ctx, &block).await?; + self.0.persistent.queue_next_block(ctx, block).await?; t.observe(); - tracing::info!( - "stored block #{}: {:#?}", - block.header().number, - block.header().payload - ); - - self.0.inner.send_modify(|inner| { - debug_assert_eq!(inner.persisted_state.next(), block.number()); - inner.persisted_state.last = Some(block.justification.clone()); - inner.queue.pop_front(); - }); } - } + }) .await; match res { Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), @@ -165,15 +204,13 @@ impl BlockStore { let t = metrics::PERSISTENT_BLOCK_STORE.genesis_latency.start(); let genesis = persistent.genesis(ctx).await.wrap("persistent.genesis()")?; t.observe(); - let t = metrics::PERSISTENT_BLOCK_STORE.state_latency.start(); - let state = persistent.state(ctx).await.wrap("persistent.state()")?; - t.observe(); - state.verify(&genesis).context("state.verify()")?; + let persisted = persistent.persisted().borrow().clone(); + persisted.verify(&genesis).context("state.verify()")?; let this = Arc::new(Self { inner: sync::watch::channel(Inner { - queued_state: sync::watch::channel(state.clone()).0, - persisted_state: state, - queue: VecDeque::new(), + queued: persisted.clone(), + persisted, + cache: VecDeque::new(), }) .0, genesis, @@ -187,6 +224,11 @@ impl BlockStore { &self.genesis } + /// Available blocks (in memory & persisted). + pub fn queued(&self) -> BlockStoreState { + self.inner.borrow().queued.clone() + } + /// Fetches a block (from queue or persistent storage). pub async fn block( &self, @@ -195,14 +237,11 @@ impl BlockStore { ) -> ctx::Result> { { let inner = self.inner.borrow(); - if !inner.queued_state.borrow().contains(number) { + if !inner.queued.contains(number) { return Ok(None); } - if !inner.persisted_state.contains(number) { - // Subtraction is safe, because we know that the block - // is in inner.queue at this point. - let idx = number.0 - inner.persisted_state.next().0; - return Ok(inner.queue.get(idx as usize).cloned()); + if let Some(block) = inner.block(number) { + return Ok(Some(block)); } } let t = metrics::PERSISTENT_BLOCK_STORE.block_latency.start(); @@ -215,7 +254,7 @@ impl BlockStore { Ok(Some(block)) } - /// Insert block to a queue to be persisted eventually. + /// Append block to a queue to be persisted eventually. /// Since persisting a block may take a significant amount of time, /// BlockStore contains a queue of blocks waiting to be persisted. /// `queue_block()` adds a block to the queue as soon as all intermediate @@ -226,74 +265,51 @@ impl BlockStore { ctx: &ctx::Ctx, block: validator::FinalBlock, ) -> ctx::Result<()> { - let number = block.number(); - { - let sub = &mut self.subscribe(); - let queued_state = - sync::wait_for(ctx, sub, |queued_state| queued_state.next() >= number).await?; - if queued_state.next() > number { - return Ok(()); - } - block.verify(&self.genesis).context("block.verify()")?; - } - self.inner.send_if_modified(|inner| { - let modified = inner.queued_state.send_if_modified(|queued_state| { - // It may happen that the same block is queued_state by 2 calls. - if queued_state.next() != number { - return false; - } - queued_state.last = Some(block.justification.clone()); - true - }); - if !modified { - return false; - } - inner.queue.push_back(block); - true - }); + block.verify(&self.genesis).context("block.verify()")?; + sync::wait_for(ctx, &mut self.inner.subscribe(), |inner| { + inner.queued.next() >= block.number() + }) + .await?; + self.inner.send_if_modified(|inner| inner.try_push(block)); Ok(()) } - /// Waits until the given block is queued to be stored. - /// If `number < state.first` then it immetiately returns `Ok(())`. + /// Waits until the given block is queued (in memory, or persisted). + /// Note that it doesn't mean that the block is actually available, as old blocks might get pruned. pub async fn wait_until_queued( &self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::OrCanceled<()> { - sync::wait_for(ctx, &mut self.subscribe(), |queued_state| { - number < queued_state.next() + ) -> ctx::OrCanceled { + Ok(sync::wait_for(ctx, &mut self.inner.subscribe(), |inner| { + number < inner.queued.next() }) - .await?; - Ok(()) + .await? + .queued + .clone()) } /// Waits until the given block is stored persistently. - /// If `number < state.first` then it immetiately returns `Ok(())`. + /// Note that it doesn't mean that the block is actually available, as old blocks might get pruned. pub async fn wait_until_persisted( &self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::OrCanceled<()> { - sync::wait_for(ctx, &mut self.inner.subscribe(), |inner| { - number < inner.persisted_state.next() - }) - .await?; - Ok(()) - } - - /// Subscribes to the `BlockStoreState` changes. - /// Note that this state includes both queue AND stored blocks. - pub fn subscribe(&self) -> sync::watch::Receiver { - self.inner.borrow().queued_state.subscribe() + ) -> ctx::OrCanceled { + Ok( + sync::wait_for(ctx, &mut self.persistent.persisted(), |persisted| { + number < persisted.next() + }) + .await? + .clone(), + ) } fn scrape_metrics(&self) -> metrics::BlockStore { let m = metrics::BlockStore::default(); let inner = self.inner.borrow(); - m.next_queued_block - .set(inner.queued_state.borrow().next().0); - m.next_persisted_block.set(inner.persisted_state.next().0); + m.next_queued_block.set(inner.queued.next().0); + m.next_persisted_block.set(inner.persisted.next().0); m } } diff --git a/node/libs/storage/src/testonly/in_memory.rs b/node/libs/storage/src/testonly/in_memory.rs index 1fd7398c..45c3492e 100644 --- a/node/libs/storage/src/testonly/in_memory.rs +++ b/node/libs/storage/src/testonly/in_memory.rs @@ -5,13 +5,13 @@ use std::{ collections::VecDeque, sync::{Arc, Mutex}, }; -use zksync_concurrency::ctx; +use zksync_concurrency::{ctx, sync}; use zksync_consensus_roles::validator; #[derive(Debug)] struct BlockStoreInner { - first: validator::BlockNumber, genesis: validator::Genesis, + persisted: sync::watch::Sender, blocks: Mutex>, } @@ -28,8 +28,8 @@ impl BlockStore { pub fn new(genesis: validator::Genesis, first: validator::BlockNumber) -> Self { assert!(genesis.fork.first_block <= first); Self(Arc::new(BlockStoreInner { - first, genesis, + persisted: sync::watch::channel(BlockStoreState { first, last: None }).0, blocks: Mutex::default(), })) } @@ -41,17 +41,8 @@ impl PersistentBlockStore for BlockStore { Ok(self.0.genesis.clone()) } - async fn state(&self, _ctx: &ctx::Ctx) -> ctx::Result { - Ok(BlockStoreState { - first: self.0.first, - last: self - .0 - .blocks - .lock() - .unwrap() - .back() - .map(|b| b.justification.clone()), - }) + fn persisted(&self) -> sync::watch::Receiver { + self.0.persisted.subscribe() } async fn block( @@ -68,21 +59,20 @@ impl PersistentBlockStore for BlockStore { Ok(blocks.get(idx as usize).context("not found")?.clone()) } - async fn store_next_block( + async fn queue_next_block( &self, _ctx: &ctx::Ctx, - block: &validator::FinalBlock, + block: validator::FinalBlock, ) -> ctx::Result<()> { let mut blocks = self.0.blocks.lock().unwrap(); - let got = block.header().number; - let want = match blocks.back() { - Some(last) => last.header().number.next(), - None => self.0.first, - }; - if got != want { - return Err(anyhow::anyhow!("got block {got:?}, while expected {want:?}").into()); + let want = self.0.persisted.borrow().next(); + if block.number() != want { + return Err(anyhow::anyhow!("got block {:?}, want {want:?}", block.number()).into()); } - blocks.push_back(block.clone()); + self.0 + .persisted + .send_modify(|p| p.last = Some(block.justification.clone())); + blocks.push_back(block); Ok(()) } } diff --git a/node/libs/storage/src/testonly/mod.rs b/node/libs/storage/src/testonly/mod.rs index f0d66052..3e8fe9ba 100644 --- a/node/libs/storage/src/testonly/mod.rs +++ b/node/libs/storage/src/testonly/mod.rs @@ -55,7 +55,7 @@ pub async fn new_store_with_first( /// Dumps all the blocks stored in `store`. pub async fn dump(ctx: &ctx::Ctx, store: &dyn PersistentBlockStore) -> Vec { let genesis = store.genesis(ctx).await.unwrap(); - let state = store.state(ctx).await.unwrap(); + let state = store.persisted().borrow().clone(); assert!(genesis.fork.first_block <= state.first); let mut blocks = vec![]; let after = state @@ -77,7 +77,7 @@ pub async fn dump(ctx: &ctx::Ctx, store: &dyn PersistentBlockStore) -> Vec anyhow::Result<()> { - let range = store.subscribe().borrow().clone(); + let range = store.queued(); for n in (range.first.0..range.next().0).map(validator::BlockNumber) { async { store diff --git a/node/libs/storage/src/tests.rs b/node/libs/storage/src/tests.rs index 221be6f7..3fdfa5be 100644 --- a/node/libs/storage/src/tests.rs +++ b/node/libs/storage/src/tests.rs @@ -1,10 +1,11 @@ use super::*; use crate::{testonly::new_store_with_first, ReplicaState}; -use zksync_concurrency::{ctx, scope, testonly::abort_on_panic}; +use zksync_concurrency::{ctx, scope, sync, testonly::abort_on_panic}; use zksync_consensus_roles::validator::testonly::Setup; #[tokio::test] async fn test_inmemory_block_store() { + abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let mut setup = Setup::new(rng, 3); @@ -16,7 +17,10 @@ async fn test_inmemory_block_store() { ); let mut want = vec![]; for block in &setup.blocks { - store.store_next_block(ctx, block).await.unwrap(); + store.queue_next_block(ctx, block.clone()).await.unwrap(); + sync::wait_for(ctx, &mut store.persisted(), |p| p.contains(block.number())) + .await + .unwrap(); want.push(block.clone()); assert_eq!(want, testonly::dump(ctx, store).await); } @@ -41,7 +45,6 @@ async fn test_state_updates() { let (store, runner) = new_store_with_first(ctx, &setup.genesis, first_block.number()).await; scope::run!(ctx, |ctx, s| async { s.spawn_bg(runner.run(ctx)); - let sub = &mut store.subscribe(); let want = BlockStoreState { first: first_block.number(), last: None, @@ -55,7 +58,7 @@ async fn test_state_updates() { ] { store.wait_until_queued(ctx, n).await.unwrap(); store.wait_until_persisted(ctx, n).await.unwrap(); - assert_eq!(want, *sub.borrow()); + assert_eq!(want, store.queued()); } for block in &setup.blocks { @@ -67,7 +70,7 @@ async fn test_state_updates() { .wait_until_persisted(ctx, block.number()) .await .unwrap(); - assert_eq!(want, *sub.borrow()); + assert_eq!(want, store.queued()); } else { // Otherwise the state should be updated as soon as block is queued. assert_eq!( @@ -75,7 +78,7 @@ async fn test_state_updates() { first: first_block.number(), last: Some(block.justification.clone()), }, - *sub.borrow() + store.queued() ); } } diff --git a/node/tools/src/rpc/methods/last_committed_block.rs b/node/tools/src/rpc/methods/last_committed_block.rs index 890607bb..658c1f82 100644 --- a/node/tools/src/rpc/methods/last_committed_block.rs +++ b/node/tools/src/rpc/methods/last_committed_block.rs @@ -9,8 +9,7 @@ use zksync_consensus_storage::BlockStore; /// Last view response for /last_view endpoint. pub fn callback(node_storage: Arc) -> RpcResult { - let sub = &mut node_storage.subscribe(); - let state = sub.borrow().clone(); + let state = node_storage.queued(); let last_committed_block_header = state .last .context("Failed to get last state") diff --git a/node/tools/src/rpc/methods/last_view.rs b/node/tools/src/rpc/methods/last_view.rs index 162ca902..e0255273 100644 --- a/node/tools/src/rpc/methods/last_view.rs +++ b/node/tools/src/rpc/methods/last_view.rs @@ -9,8 +9,7 @@ use zksync_consensus_storage::BlockStore; /// Last view response for /last_view endpoint. pub fn callback(node_storage: Arc) -> RpcResult { - let sub = &mut node_storage.subscribe(); - let state = sub.borrow().clone(); + let state = node_storage.queued(); let last_view = state .last .context("Failed to get last state") @@ -18,6 +17,8 @@ pub fn callback(node_storage: Arc) -> RpcResult { .view() .number .0; + // TODO(gprusak): this is the view of the last finalized block, not the current view of the + // replica. Fix this. Ok(serde_json::json!({ "last_view": last_view })) diff --git a/node/tools/src/store.rs b/node/tools/src/store.rs index af581000..27f9bd6f 100644 --- a/node/tools/src/store.rs +++ b/node/tools/src/store.rs @@ -6,7 +6,7 @@ use std::{ path::Path, sync::{Arc, RwLock}, }; -use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_concurrency::{ctx, error::Wrap as _, scope, sync}; use zksync_consensus_roles::validator; use zksync_consensus_storage::{BlockStoreState, PersistentBlockStore, ReplicaState, ReplicaStore}; @@ -49,6 +49,7 @@ impl DatabaseKey { struct Inner { genesis: validator::Genesis, + persisted: sync::watch::Sender, db: RwLock, } @@ -67,19 +68,24 @@ impl RocksDB { let mut options = rocksdb::Options::default(); options.create_missing_column_families(true); options.create_if_missing(true); + let db = scope::wait_blocking(|| { + rocksdb::DB::open(&options, path).context("Failed opening RocksDB") + }) + .await?; + Ok(Self(Arc::new(Inner { + persisted: sync::watch::channel(BlockStoreState { + // `RocksDB` is assumed to store all blocks starting from genesis. + first: genesis.fork.first_block, + last: scope::wait_blocking(|| Self::last_blocking(&db)).await?, + }) + .0, genesis, - db: RwLock::new( - scope::wait_blocking(|| { - rocksdb::DB::open(&options, path).context("Failed opening RocksDB") - }) - .await?, - ), + db: RwLock::new(db), }))) } - fn last_blocking(&self) -> anyhow::Result> { - let db = self.0.db.read().unwrap(); + fn last_blocking(db: &rocksdb::DB) -> anyhow::Result> { let mut options = ReadOptions::default(); options.set_iterate_range(DatabaseKey::BLOCKS_START_KEY..); let Some(res) = db @@ -107,12 +113,8 @@ impl PersistentBlockStore for RocksDB { Ok(self.0.genesis.clone()) } - async fn state(&self, _ctx: &ctx::Ctx) -> ctx::Result { - Ok(BlockStoreState { - // `RocksDB` is assumed to store all blocks starting from genesis. - first: self.0.genesis.fork.first_block, - last: scope::wait_blocking(|| self.last_blocking()).await?, - }) + fn persisted(&self) -> sync::watch::Receiver { + self.0.persisted.subscribe() } async fn block( @@ -133,26 +135,36 @@ impl PersistentBlockStore for RocksDB { } #[tracing::instrument(level = "debug", skip(self))] - async fn store_next_block( + async fn queue_next_block( &self, _ctx: &ctx::Ctx, - block: &validator::FinalBlock, + block: validator::FinalBlock, ) -> ctx::Result<()> { scope::wait_blocking(|| { let db = self.0.db.write().unwrap(); + let want = self.0.persisted.borrow().next(); + anyhow::ensure!( + block.number() == want, + "got {:?} want {want:?}", + block.number() + ); let block_number = block.header().number; let mut write_batch = rocksdb::WriteBatch::default(); write_batch.put( DatabaseKey::Block(block_number).encode_key(), - zksync_protobuf::encode(block), + zksync_protobuf::encode(&block), ); // Commit the transaction. db.write(write_batch) .context("Failed writing block to database")?; + self.0 + .persisted + .send_modify(|p| p.last = Some(block.justification.clone())); Ok(()) }) .await - .wrap(block.header().number) + .context(block.header().number)?; + Ok(()) } } diff --git a/node/tools/src/tests.rs b/node/tools/src/tests.rs index f47a9994..0d9ab857 100644 --- a/node/tools/src/tests.rs +++ b/node/tools/src/tests.rs @@ -1,7 +1,7 @@ use crate::{store, AppConfig}; use rand::{distributions::Distribution, Rng}; use tempfile::TempDir; -use zksync_concurrency::ctx; +use zksync_concurrency::{ctx, sync}; use zksync_consensus_roles::validator::testonly::Setup; use zksync_consensus_storage::{testonly, PersistentBlockStore}; use zksync_consensus_utils::EncodeDist; @@ -49,7 +49,10 @@ async fn test_reopen_rocksdb() { let store = store::RocksDB::open(setup.genesis.clone(), dir.path()) .await .unwrap(); - store.store_next_block(ctx, b).await.unwrap(); + store.queue_next_block(ctx, b.clone()).await.unwrap(); + sync::wait_for(ctx, &mut store.persisted(), |p| p.contains(b.number())) + .await + .unwrap(); want.push(b.clone()); assert_eq!(want, testonly::dump(ctx, &store).await); } From d7069f7b971d9bb2c8f8fff0ecd2541970960b75 Mon Sep 17 00:00:00 2001 From: Nacho Avecilla Date: Fri, 19 Apr 2024 17:17:10 -0300 Subject: [PATCH 20/79] New attester role for full nodes (#93) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR introduces a new role called `attester`. ## Why ❔ This role will handle various keys and messages associated with L1 batches. In the future, this will enable the use of a new signature scheme specifically for L1 batch signatures, distinguishing them from the block signatures of validators. --------- Co-authored-by: Ignacio Avecilla --- node/actors/bft/src/testonly/ut_harness.rs | 4 +- node/actors/network/src/consensus/mod.rs | 15 +- node/actors/network/src/consensus/tests.rs | 18 +- node/actors/network/src/gossip/tests.rs | 4 +- node/actors/network/src/io.rs | 7 +- node/actors/network/src/proto/signature.proto | 4 +- node/actors/network/src/rpc/signature.rs | 4 +- node/actors/network/src/testonly.rs | 2 +- node/libs/roles/src/attester/conv.rs | 84 +++++++ .../src/attester/keys/aggregate_signature.rs | 72 ++++++ node/libs/roles/src/attester/keys/mod.rs | 11 + .../roles/src/attester/keys/public_key.rs | 33 +++ .../roles/src/attester/keys/secret_key.rs | 74 ++++++ .../libs/roles/src/attester/keys/signature.rs | 56 +++++ .../messages/l1_batch.rs | 59 +++-- node/libs/roles/src/attester/messages/mod.rs | 6 + node/libs/roles/src/attester/messages/msg.rs | 234 ++++++++++++++++++ node/libs/roles/src/attester/mod.rs | 12 + node/libs/roles/src/attester/testonly.rs | 71 ++++++ node/libs/roles/src/attester/tests.rs | 137 ++++++++++ node/libs/roles/src/lib.rs | 3 + node/libs/roles/src/proto/attester.proto | 27 ++ node/libs/roles/src/proto/validator.proto | 8 +- node/libs/roles/src/validator/conv.rs | 40 +-- .../roles/src/validator/messages/consensus.rs | 5 +- node/libs/roles/src/validator/messages/mod.rs | 2 - node/libs/roles/src/validator/messages/msg.rs | 16 +- .../roles/src/validator/messages/tests.rs | 30 ++- node/libs/roles/src/validator/testonly.rs | 25 +- node/libs/roles/src/validator/tests.rs | 16 +- node/tools/src/bin/deployer.rs | 2 +- node/tools/src/bin/localnet_config.rs | 2 +- 32 files changed, 977 insertions(+), 106 deletions(-) create mode 100644 node/libs/roles/src/attester/conv.rs create mode 100644 node/libs/roles/src/attester/keys/aggregate_signature.rs create mode 100644 node/libs/roles/src/attester/keys/mod.rs create mode 100644 node/libs/roles/src/attester/keys/public_key.rs create mode 100644 node/libs/roles/src/attester/keys/secret_key.rs create mode 100644 node/libs/roles/src/attester/keys/signature.rs rename node/libs/roles/src/{validator => attester}/messages/l1_batch.rs (53%) create mode 100644 node/libs/roles/src/attester/messages/mod.rs create mode 100644 node/libs/roles/src/attester/messages/msg.rs create mode 100644 node/libs/roles/src/attester/mod.rs create mode 100644 node/libs/roles/src/attester/testonly.rs create mode 100644 node/libs/roles/src/attester/tests.rs create mode 100644 node/libs/roles/src/proto/attester.proto diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index a99dae9f..c61f0cce 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -60,7 +60,7 @@ impl UTHarness { let (send, recv) = ctx::channel::unbounded(); let cfg = Arc::new(Config { - secret_key: setup.keys[0].clone(), + secret_key: setup.validator_keys[0].clone(), block_store: block_store.clone(), replica_store: Box::new(in_memory::ReplicaStore::default()), payload_manager, @@ -74,7 +74,7 @@ impl UTHarness { leader, replica, pipe: recv, - keys: setup.keys.clone(), + keys: setup.validator_keys.clone(), }; let _: Signed = this.try_recv().unwrap(); (this, runner) diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index f89ee422..996f1bde 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -8,7 +8,8 @@ use rand::seq::SliceRandom; use std::{collections::HashSet, sync::Arc}; use tracing::Instrument as _; use zksync_concurrency::{ctx, oneshot, scope, sync, time}; -use zksync_consensus_roles::validator::{self, L1BatchQC}; +use zksync_consensus_roles::attester::{self, L1Batch, L1BatchQC}; +use zksync_consensus_roles::validator::{self}; use zksync_protobuf::kB; mod handshake; @@ -43,7 +44,7 @@ pub(crate) struct Network { /// Set of the currently open outbound connections. pub(crate) outbound: PoolWatch>, /// L1 batch QC. - pub(crate) l1_batch_qc: L1BatchQC, + pub(crate) l1_batch_qc: Option, } #[async_trait::async_trait] @@ -79,8 +80,10 @@ impl rpc::Handler for &L1BatchServer<'_> { async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::signature::Req) -> anyhow::Result<()> { let genesis = self.0.gossip.genesis(); - self.0.l1_batch_qc.verify(genesis).unwrap(); - self.0.l1_batch_qc.clone().add(&req.0, genesis); + // FIXME Remove unwrap and find a way to handle the QC. + let qc = self.0.l1_batch_qc.as_ref().context("no L1BatchQC")?; + qc.verify(genesis).unwrap(); + qc.clone().add(&req.0, genesis); return Ok(()); } } @@ -94,7 +97,7 @@ impl Network { key, inbound: PoolWatch::new(validators.clone(), 0), outbound: PoolWatch::new(validators.clone(), 0), - l1_batch_qc: L1BatchQC::new(validators.len()), + l1_batch_qc: None, gossip, })) } @@ -129,7 +132,7 @@ impl Network { pub(crate) async fn broadcast_signature( &self, ctx: &ctx::Ctx, - signature: validator::Signed, + signature: attester::SignedBatchMsg, ) -> anyhow::Result<()> { let req = rpc::signature::Req(signature); let outbound = self.outbound.current(); diff --git a/node/actors/network/src/consensus/tests.rs b/node/actors/network/src/consensus/tests.rs index 2bb78550..6ff1262d 100644 --- a/node/actors/network/src/consensus/tests.rs +++ b/node/actors/network/src/consensus/tests.rs @@ -85,11 +85,13 @@ async fn test_genesis_mismatch() { .validator_addrs .update( &setup.genesis.validators, - &[Arc::new(setup.keys[1].sign_msg(validator::NetAddress { - addr: *cfgs[1].server_addr, - version: 0, - timestamp: ctx.now_utc(), - }))], + &[Arc::new(setup.validator_keys[1].sign_msg( + validator::NetAddress { + addr: *cfgs[1].server_addr, + version: 0, + timestamp: ctx.now_utc(), + }, + ))], ) .await .unwrap(); @@ -103,7 +105,7 @@ async fn test_genesis_mismatch() { .context("preface::accept()")?; assert_eq!(endpoint, preface::Endpoint::ConsensusNet); tracing::info!("Expect the handshake to fail"); - let res = handshake::inbound(ctx, &setup.keys[1], rng.gen(), &mut stream).await; + let res = handshake::inbound(ctx, &setup.validator_keys[1], rng.gen(), &mut stream).await; assert_matches!(res, Err(handshake::Error::GenesisMismatch)); tracing::info!("Try to connect to a node with a mismatching genesis."); @@ -113,10 +115,10 @@ async fn test_genesis_mismatch() { .context("preface::connect")?; let res = handshake::outbound( ctx, - &setup.keys[1], + &setup.validator_keys[1], rng.gen(), &mut stream, - &setup.keys[0].public(), + &setup.validator_keys[0].public(), ) .await; tracing::info!( diff --git a/node/actors/network/src/gossip/tests.rs b/node/actors/network/src/gossip/tests.rs index b897e0a6..11ed9570 100644 --- a/node/actors/network/src/gossip/tests.rs +++ b/node/actors/network/src/gossip/tests.rs @@ -576,7 +576,9 @@ async fn validator_node_restart() { let sub = &mut node1.net.gossip.validator_addrs.subscribe(); let want = Some(*cfgs[0].server_addr); sync::wait_for(ctx, sub, |got| { - got.get(&setup.keys[0].public()).map(|x| x.msg.addr) == want + got.get(&setup.validator_keys[0].public()) + .map(|x| x.msg.addr) + == want }) .await?; Ok(()) diff --git a/node/actors/network/src/io.rs b/node/actors/network/src/io.rs index b365118b..af77093d 100644 --- a/node/actors/network/src/io.rs +++ b/node/actors/network/src/io.rs @@ -1,6 +1,9 @@ #![allow(missing_docs)] use zksync_concurrency::oneshot; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::{ + attester::{self, L1Batch}, + node, validator, +}; use zksync_consensus_storage::BlockStoreState; /// All the messages that other actors can send to the Network actor. @@ -16,7 +19,7 @@ pub enum InputMessage { #[derive(Debug)] pub struct L1BatchInputMessage { - pub message: validator::Signed, + pub message: attester::SignedBatchMsg, } /// Message types from the Consensus actor. diff --git a/node/actors/network/src/proto/signature.proto b/node/actors/network/src/proto/signature.proto index a0b53625..fcf6bd7d 100644 --- a/node/actors/network/src/proto/signature.proto +++ b/node/actors/network/src/proto/signature.proto @@ -2,11 +2,11 @@ syntax = "proto3"; package zksync.network.consensus; -import "zksync/roles/validator.proto"; +import "zksync/roles/attester.proto"; import "zksync/std.proto"; message SignatureReq { - optional roles.validator.Signed msg = 1; + optional roles.attester.SignedBatch msg = 1; } message SignatureResp {} diff --git a/node/actors/network/src/rpc/signature.rs b/node/actors/network/src/rpc/signature.rs index 34e2b327..9145b3cc 100644 --- a/node/actors/network/src/rpc/signature.rs +++ b/node/actors/network/src/rpc/signature.rs @@ -1,6 +1,6 @@ //! Defines RPC for passing consensus messages. use crate::{consensus::Network, mux, proto::consensus as proto}; -use zksync_consensus_roles::validator; +use zksync_consensus_roles::attester::{self, L1Batch}; use zksync_protobuf::{read_required, ProtoFmt}; /// Signature RPC. @@ -18,7 +18,7 @@ impl super::Rpc for Rpc { pub(crate) struct L1BatchServer<'a>(pub(crate) &'a Network); /// Signed consensus message that the receiving peer should process. #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct Req(pub(crate) validator::Signed); +pub(crate) struct Req(pub(crate) attester::SignedBatchMsg); /// Confirmation that the signature message has been processed. #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/node/actors/network/src/testonly.rs b/node/actors/network/src/testonly.rs index 8632084a..dad1ed94 100644 --- a/node/actors/network/src/testonly.rs +++ b/node/actors/network/src/testonly.rs @@ -53,7 +53,7 @@ pub fn new_configs( setup: &validator::testonly::Setup, gossip_peers: usize, ) -> Vec { - let configs = setup.keys.iter().map(|key| { + let configs = setup.validator_keys.iter().map(|key| { let addr = net::tcp::testonly::reserve_listener(); Config { server_addr: addr, diff --git a/node/libs/roles/src/attester/conv.rs b/node/libs/roles/src/attester/conv.rs new file mode 100644 index 00000000..d30002ad --- /dev/null +++ b/node/libs/roles/src/attester/conv.rs @@ -0,0 +1,84 @@ +use crate::proto::attester::{self as proto}; +use anyhow::Context; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_utils::enum_util::Variant; +use zksync_protobuf::{read_required, required, ProtoFmt}; + +use super::{BatchNumber, L1Batch, Msg, PublicKey, Signature, SignedBatchMsg}; + +impl ProtoFmt for L1Batch { + type Proto = proto::L1Batch; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + number: BatchNumber(*required(&r.number).context("number")?), + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + number: Some(self.number.0), + } + } +} + +impl + Clone> ProtoFmt for SignedBatchMsg { + type Proto = proto::SignedBatch; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + msg: V::extract(read_required::(&r.msg).context("msg")?)?, + key: read_required(&r.key).context("key")?, + sig: read_required(&r.sig).context("sig")?, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + msg: Some(self.msg.clone().insert().build()), + key: Some(self.key.build()), + sig: Some(self.sig.build()), + } + } +} + +impl ProtoFmt for Msg { + type Proto = proto::Msg; + + fn read(r: &Self::Proto) -> anyhow::Result { + use proto::msg::T; + Ok(match r.t.as_ref().context("missing")? { + T::L1Batch(r) => Self::L1Batch(ProtoFmt::read(r).context("L1Batch")?), + }) + } + + fn build(&self) -> Self::Proto { + use proto::msg::T; + + let t = match self { + Self::L1Batch(x) => T::L1Batch(x.build()), + }; + + Self::Proto { t: Some(t) } + } +} + +impl ProtoFmt for PublicKey { + type Proto = proto::PublicKey; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self(ByteFmt::decode(required(&r.bn254)?)?)) + } + fn build(&self) -> Self::Proto { + Self::Proto { + bn254: Some(self.0.encode()), + } + } +} + +impl ProtoFmt for Signature { + type Proto = proto::Signature; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self(ByteFmt::decode(required(&r.bn254)?)?)) + } + fn build(&self) -> Self::Proto { + Self::Proto { + bn254: Some(self.0.encode()), + } + } +} diff --git a/node/libs/roles/src/attester/keys/aggregate_signature.rs b/node/libs/roles/src/attester/keys/aggregate_signature.rs new file mode 100644 index 00000000..20eb72ab --- /dev/null +++ b/node/libs/roles/src/attester/keys/aggregate_signature.rs @@ -0,0 +1,72 @@ +use crate::attester::{L1Batch, MsgHash}; + +use super::{PublicKey, Signature}; +use std::fmt; +use zksync_consensus_crypto::{bn254, ByteFmt, Text, TextFmt}; +use zksync_consensus_utils::enum_util::Variant; + +/// An aggregate signature from an attester. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Default)] +pub struct AggregateSignature(pub(crate) bn254::AggregateSignature); + +impl AggregateSignature { + /// Add a signature to the aggregation. + pub fn add(&mut self, sig: &Signature) { + self.0.add(&sig.0) + } + + /// Verify a list of messages against a list of public keys. + pub(crate) fn verify_messages<'a>( + &self, + messages_and_keys: impl Iterator, + ) -> anyhow::Result<()> { + let hashes_and_keys = + messages_and_keys.map(|(message, key)| (message.insert().hash(), key)); + self.verify_hash(hashes_and_keys) + } + + /// Verify a message hash against a list of public keys. + pub(crate) fn verify_hash<'a>( + &self, + hashes_and_keys: impl Iterator, + ) -> anyhow::Result<()> { + let bytes_and_pks: Vec<_> = hashes_and_keys + .map(|(hash, pk)| (hash.0.as_bytes().to_owned(), &pk.0)) + .collect(); + + let bytes_and_pks = bytes_and_pks.iter().map(|(bytes, pk)| (&bytes[..], *pk)); + + self.0.verify(bytes_and_pks) + } +} + +impl ByteFmt for AggregateSignature { + fn decode(bytes: &[u8]) -> anyhow::Result { + ByteFmt::decode(bytes).map(Self) + } + + fn encode(&self) -> Vec { + ByteFmt::encode(&self.0) + } +} + +impl TextFmt for AggregateSignature { + fn decode(text: Text) -> anyhow::Result { + text.strip("attester:aggregate_signature:bn254:")? + .decode_hex() + .map(Self) + } + + fn encode(&self) -> String { + format!( + "attester:aggregate_signature:bn254:{}", + hex::encode(ByteFmt::encode(&self.0)) + ) + } +} + +impl fmt::Debug for AggregateSignature { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str(&TextFmt::encode(self)) + } +} diff --git a/node/libs/roles/src/attester/keys/mod.rs b/node/libs/roles/src/attester/keys/mod.rs new file mode 100644 index 00000000..5e1d558b --- /dev/null +++ b/node/libs/roles/src/attester/keys/mod.rs @@ -0,0 +1,11 @@ +//! Keys and signatures used by the attester. + +mod aggregate_signature; +mod public_key; +mod secret_key; +mod signature; + +pub use aggregate_signature::AggregateSignature; +pub use public_key::PublicKey; +pub use secret_key::SecretKey; +pub use signature::Signature; diff --git a/node/libs/roles/src/attester/keys/public_key.rs b/node/libs/roles/src/attester/keys/public_key.rs new file mode 100644 index 00000000..9f23d620 --- /dev/null +++ b/node/libs/roles/src/attester/keys/public_key.rs @@ -0,0 +1,33 @@ +use std::fmt; +use zksync_consensus_crypto::{bn254, ByteFmt, Text, TextFmt}; + +/// A public key for an attester used in L1 batch signing. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PublicKey(pub(crate) bn254::PublicKey); + +impl ByteFmt for PublicKey { + fn encode(&self) -> Vec { + ByteFmt::encode(&self.0) + } + fn decode(bytes: &[u8]) -> anyhow::Result { + ByteFmt::decode(bytes).map(Self) + } +} + +impl TextFmt for PublicKey { + fn encode(&self) -> String { + format!( + "attester:public:bn254:{}", + hex::encode(ByteFmt::encode(&self.0)) + ) + } + fn decode(text: Text) -> anyhow::Result { + text.strip("attester:public:bn254:")?.decode_hex().map(Self) + } +} + +impl fmt::Debug for PublicKey { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(&TextFmt::encode(self)) + } +} diff --git a/node/libs/roles/src/attester/keys/secret_key.rs b/node/libs/roles/src/attester/keys/secret_key.rs new file mode 100644 index 00000000..b225521b --- /dev/null +++ b/node/libs/roles/src/attester/keys/secret_key.rs @@ -0,0 +1,74 @@ +use super::{PublicKey, Signature}; +use crate::attester::{L1Batch, Msg, MsgHash, SignedBatchMsg}; +use std::{fmt, sync::Arc}; +use zksync_consensus_crypto::{bn254, ByteFmt, Text, TextFmt}; +use zksync_consensus_utils::enum_util::Variant; + +/// A secret key for the attester role to sign L1 batches. +/// SecretKey is put into an Arc, so that we can clone it, +/// without copying the secret all over the RAM. +#[derive(Clone, PartialEq)] +pub struct SecretKey(pub(crate) Arc); + +impl SecretKey { + /// Generates a batch secret key from a cryptographically-secure entropy source. + pub fn generate() -> Self { + Self(Arc::new(bn254::SecretKey::generate())) + } + + /// Public key corresponding to this secret key. + pub fn public(&self) -> PublicKey { + PublicKey(self.0.public()) + } + + /// Signs a batch message. + pub fn sign_batch_msg(&self, msg: L1Batch) -> SignedBatchMsg + where + V: Variant, + { + let msg = msg.insert(); + SignedBatchMsg { + sig: self.sign_hash(&msg.hash()), + key: self.public(), + msg: V::extract(msg).unwrap(), + } + } + + /// Sign a message hash. + pub fn sign_hash(&self, msg_hash: &MsgHash) -> Signature { + Signature(self.0.sign(&ByteFmt::encode(msg_hash))) + } +} + +impl ByteFmt for SecretKey { + fn encode(&self) -> Vec { + ByteFmt::encode(&*self.0) + } + + fn decode(bytes: &[u8]) -> anyhow::Result { + ByteFmt::decode(bytes).map(Arc::new).map(Self) + } +} + +impl TextFmt for SecretKey { + fn encode(&self) -> String { + format!( + "attester:secret:bn254:{}", + hex::encode(ByteFmt::encode(&*self.0)) + ) + } + + fn decode(text: Text) -> anyhow::Result { + text.strip("attester:secret:bn254:")? + .decode_hex() + .map(Arc::new) + .map(Self) + } +} + +impl fmt::Debug for SecretKey { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + // The secret itself should never be logged. + write!(fmt, "", TextFmt::encode(&self.public())) + } +} diff --git a/node/libs/roles/src/attester/keys/signature.rs b/node/libs/roles/src/attester/keys/signature.rs new file mode 100644 index 00000000..82dfa8f5 --- /dev/null +++ b/node/libs/roles/src/attester/keys/signature.rs @@ -0,0 +1,56 @@ +use crate::attester::{Msg, MsgHash}; + +use super::PublicKey; +use std::fmt; +use zksync_consensus_crypto::{bn254, ByteFmt, Text, TextFmt}; + +/// A signature of an L1 batch from a validator. +#[derive(Clone, PartialEq, Eq)] +pub struct Signature(pub(crate) bn254::Signature); + +impl Signature { + /// Verify a message against a public key. + pub fn verify_msg(&self, msg: &Msg, pk: &PublicKey) -> anyhow::Result<()> { + self.verify_hash(&msg.hash(), pk) + } + + /// Verify a message hash against a public key. + pub fn verify_hash(&self, msg_hash: &MsgHash, pk: &PublicKey) -> anyhow::Result<()> { + self.0.verify(&ByteFmt::encode(msg_hash), &pk.0) + } +} + +impl ByteFmt for Signature { + fn encode(&self) -> Vec { + ByteFmt::encode(&self.0) + } + fn decode(bytes: &[u8]) -> anyhow::Result { + ByteFmt::decode(bytes).map(Self) + } +} + +impl TextFmt for Signature { + fn encode(&self) -> String { + format!( + "attester:signature:bn254:{}", + hex::encode(ByteFmt::encode(&self.0)) + ) + } + fn decode(text: Text) -> anyhow::Result { + text.strip("attester:signature:bn254:")? + .decode_hex() + .map(Self) + } +} + +impl fmt::Debug for Signature { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(&TextFmt::encode(self)) + } +} + +impl std::hash::Hash for Signature { + fn hash(&self, state: &mut H) { + ByteFmt::encode(self).hash(state) + } +} diff --git a/node/libs/roles/src/validator/messages/l1_batch.rs b/node/libs/roles/src/attester/messages/l1_batch.rs similarity index 53% rename from node/libs/roles/src/validator/messages/l1_batch.rs rename to node/libs/roles/src/attester/messages/l1_batch.rs index ae23b1a0..22ef1aa6 100644 --- a/node/libs/roles/src/validator/messages/l1_batch.rs +++ b/node/libs/roles/src/attester/messages/l1_batch.rs @@ -1,22 +1,32 @@ -use crate::validator::{self}; +use crate::{ + attester::{self, AggregateSignature}, + validator::Genesis, +}; -use super::{Genesis, Signed}; +use super::{SignedBatchMsg, Signers}; + +#[derive(Clone, Debug, PartialEq, Eq, Hash, Default)] +/// A batch number. +pub struct BatchNumber(pub u64); /// A message to send by validators to the gossip network. /// It contains the validators signature to sign the block batches to be sent to L1. -#[derive(Debug, Clone, Eq, PartialEq, Default)] -pub struct L1BatchMsg; +#[derive(Clone, Debug, PartialEq, Eq, Hash, Default)] +pub struct L1Batch { + /// The number of the batch. + pub number: BatchNumber, +} /// A certificate for a batch of L2 blocks to be sent to L1. /// It contains the signatures of the validators that signed the batch. #[derive(Debug, Clone, Eq, PartialEq)] pub struct L1BatchQC { /// The aggregate signature of the signed L1 batches. - pub signature: validator::AggregateSignature, + pub signature: AggregateSignature, /// The validators that signed this message. - pub signers: validator::Signers, + pub signers: Signers, /// The message that was signed. - pub message: L1BatchMsg, + pub message: L1Batch, } /// Error returned by `L1BatchQC::verify()` if the signature is invalid. @@ -25,25 +35,33 @@ pub enum L1BatchQCVerifyError { /// Bad signature. #[error("bad signature: {0:#}")] BadSignature(#[source] anyhow::Error), + /// Not enough signers. + #[error("not enough signers: got {got}, want {want}")] + NotEnoughSigners { + /// Got signers. + got: usize, + /// Want signers. + want: usize, + }, } impl L1BatchQC { /// Create a new empty instance for a given `ReplicaCommit` message and a validator set size. - pub fn new(validators: usize) -> Self { + pub fn new(message: L1Batch, genesis: &Genesis) -> Self { Self { - signature: validator::AggregateSignature::default(), - signers: validator::Signers::new(validators), - message: L1BatchMsg, + message, + signers: Signers::new(genesis.attesters.len()), + signature: attester::AggregateSignature::default(), } } /// Add a attester's signature. /// Signature is assumed to be already verified. - pub fn add(&mut self, msg: &Signed, genesis: &Genesis) { + pub fn add(&mut self, msg: &SignedBatchMsg, genesis: &Genesis) { if self.message != msg.msg { return; }; - let Some(i) = genesis.validators.index(&msg.key) else { + let Some(i) = genesis.attesters.index(&msg.key) else { return; }; if self.signers.0[i] { @@ -53,11 +71,20 @@ impl L1BatchQC { self.signature.add(&msg.sig); } - /// Verifies the signature of the CommitQC. + /// Verifies the signature of the L1BatchQC. pub fn verify(&self, genesis: &Genesis) -> Result<(), L1BatchQCVerifyError> { - // Now we can verify the signature. + // Verify that we have enough signers. + let num_signers = self.signers.count(); + let threshold = genesis.attesters.threshold(); + if num_signers < threshold { + return Err(L1BatchQCVerifyError::NotEnoughSigners { + got: num_signers, + want: threshold, + }); + } + let messages_and_keys = genesis - .validators + .attesters .iter() .enumerate() .filter(|(i, _)| self.signers.0[*i]) diff --git a/node/libs/roles/src/attester/messages/mod.rs b/node/libs/roles/src/attester/messages/mod.rs new file mode 100644 index 00000000..d8a04914 --- /dev/null +++ b/node/libs/roles/src/attester/messages/mod.rs @@ -0,0 +1,6 @@ +//! Attester messages. +mod l1_batch; +mod msg; + +pub use l1_batch::*; +pub use msg::*; diff --git a/node/libs/roles/src/attester/messages/msg.rs b/node/libs/roles/src/attester/messages/msg.rs new file mode 100644 index 00000000..241fd630 --- /dev/null +++ b/node/libs/roles/src/attester/messages/msg.rs @@ -0,0 +1,234 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt, +}; + +use crate::{ + attester::{L1Batch, PublicKey, Signature}, + validator::ViewNumber, +}; +use bit_vec::BitVec; +use zksync_consensus_crypto::{keccak256, ByteFmt, Text, TextFmt}; +use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; + +/// Message that is sent by an attester. +pub enum Msg { + /// L1 batch message. + L1Batch(L1Batch), +} + +impl Msg { + /// Returns the hash of the message. + pub fn hash(&self) -> MsgHash { + MsgHash(keccak256::Keccak256::new(&zksync_protobuf::canonical(self))) + } +} + +impl Variant for L1Batch { + fn insert(self) -> Msg { + Msg::L1Batch(self) + } + fn extract(msg: Msg) -> Result { + let Msg::L1Batch(this) = msg; + Ok(this) + } +} + +/// Strongly typed signed l1 batch message. +/// WARNING: signature is not guaranteed to be valid. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct SignedBatchMsg> { + /// The message that was signed. + pub msg: V, + /// The public key of the signer. + pub key: PublicKey, + /// The signature. + pub sig: Signature, +} + +/// Struct that represents a bit map of validators. We use it to compactly store +/// which validators signed a given message. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct Signers(pub BitVec); + +impl Signers { + /// Constructs an empty Signers set. + pub fn new(n: usize) -> Self { + Self(BitVec::from_elem(n, false)) + } + + /// Returns the number of signers, i.e. the number of validators that signed + /// the particular message that this signer bitmap refers to. + pub fn count(&self) -> usize { + self.0.iter().filter(|b| *b).count() + } + + /// Size of the corresponding ValidatorSet. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns true if there are no signers. + pub fn is_empty(&self) -> bool { + self.0.none() + } +} + +/// A struct that represents a set of validators. It is used to store the current validator set. +/// We represent each validator by its validator public key. +#[derive(Clone, Debug, PartialEq, Eq, Default)] +pub struct AttesterSet { + vec: Vec, + map: BTreeMap, +} + +impl AttesterSet { + /// Creates a new AttesterSet from a list of validator public keys. + pub fn new(attesters: impl IntoIterator) -> anyhow::Result { + let mut set = BTreeSet::new(); + for attester in attesters { + anyhow::ensure!(set.insert(attester), "Duplicate validator in ValidatorSet"); + } + anyhow::ensure!( + !set.is_empty(), + "ValidatorSet must contain at least one validator" + ); + Ok(Self { + vec: set.iter().cloned().collect(), + map: set.into_iter().enumerate().map(|(i, pk)| (pk, i)).collect(), + }) + } + + /// Iterates over validators. + pub fn iter(&self) -> impl Iterator { + self.vec.iter() + } + + /// Returns the number of validators. + #[allow(clippy::len_without_is_empty)] // a valid `ValidatorSet` is always non-empty by construction + pub fn len(&self) -> usize { + self.vec.len() + } + + /// Returns true if the given validator is in the validator set. + pub fn contains(&self, validator: &PublicKey) -> bool { + self.map.contains_key(validator) + } + + /// Get validator by its index in the set. + pub fn get(&self, index: usize) -> Option<&PublicKey> { + self.vec.get(index) + } + + /// Get the index of a validator in the set. + pub fn index(&self, validator: &PublicKey) -> Option { + self.map.get(validator).copied() + } + + /// Computes the validator for the given view. + pub fn view_leader(&self, view_number: ViewNumber) -> PublicKey { + let index = view_number.0 as usize % self.len(); + self.get(index).unwrap().clone() + } + + /// Signature threshold for this validator set. + pub fn threshold(&self) -> usize { + threshold(self.len()) + } + + /// Maximal number of faulty replicas allowed in this validator set. + pub fn faulty_replicas(&self) -> usize { + faulty_replicas(self.len()) + } +} + +/// Calculate the maximum number of faulty replicas, for a given number of replicas. +pub fn faulty_replicas(n: usize) -> usize { + // Calculate the allowed maximum number of faulty replicas. We want the following relationship to hold: + // n = 5*f + 1 + // for n total replicas and f faulty replicas. This results in the following formula for the maximum + // number of faulty replicas: + // f = floor((n - 1) / 5) + // Because of this, it doesn't make sense to have 5*f + 2 or 5*f + 3 replicas. It won't increase the number + // of allowed faulty replicas. + (n - 1) / 5 +} + +/// Calculate the consensus threshold, the minimum number of votes for any consensus action to be valid, +/// for a given number of replicas. +pub fn threshold(n: usize) -> usize { + n - faulty_replicas(n) +} + +impl std::ops::BitOrAssign<&Self> for Signers { + fn bitor_assign(&mut self, other: &Self) { + self.0.or(&other.0); + } +} + +impl std::ops::BitAndAssign<&Self> for Signers { + fn bitand_assign(&mut self, other: &Self) { + self.0.and(&other.0); + } +} + +impl std::ops::BitAnd for &Signers { + type Output = Signers; + fn bitand(self, other: Self) -> Signers { + let mut this = self.clone(); + this &= other; + this + } +} + +/// The hash of a message. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct MsgHash(pub(crate) keccak256::Keccak256); + +impl ByteFmt for MsgHash { + fn decode(bytes: &[u8]) -> anyhow::Result { + ByteFmt::decode(bytes).map(Self) + } + + fn encode(&self) -> Vec { + ByteFmt::encode(&self.0) + } +} + +impl TextFmt for MsgHash { + fn decode(text: Text) -> anyhow::Result { + text.strip("attester_msg:keccak256:")? + .decode_hex() + .map(Self) + } + + fn encode(&self) -> String { + format!( + "attester_msg:keccak256:{}", + hex::encode(ByteFmt::encode(&self.0)) + ) + } +} + +impl fmt::Debug for MsgHash { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(&TextFmt::encode(self)) + } +} + +impl + Clone> SignedBatchMsg { + /// Verify the signature on the message. + pub fn verify(&self) -> anyhow::Result<()> { + self.sig.verify_msg(&self.msg.clone().insert(), &self.key) + } + + /// Casts a signed message variant to sub/super variant. + /// It is an equivalent of constructing/deconstructing enum values. + pub fn cast(self) -> Result, BadVariantError> { + Ok(SignedBatchMsg { + msg: V::extract(self.msg.insert())?, + key: self.key, + sig: self.sig, + }) + } +} diff --git a/node/libs/roles/src/attester/mod.rs b/node/libs/roles/src/attester/mod.rs new file mode 100644 index 00000000..5bc5b466 --- /dev/null +++ b/node/libs/roles/src/attester/mod.rs @@ -0,0 +1,12 @@ +//! Attester role implementation. + +#[cfg(test)] +mod tests; + +mod conv; +mod keys; +mod messages; +mod testonly; + +pub use self::keys::*; +pub use self::messages::*; diff --git a/node/libs/roles/src/attester/testonly.rs b/node/libs/roles/src/attester/testonly.rs new file mode 100644 index 00000000..362c78fe --- /dev/null +++ b/node/libs/roles/src/attester/testonly.rs @@ -0,0 +1,71 @@ +use super::{ + AggregateSignature, AttesterSet, L1Batch, Msg, MsgHash, PublicKey, SecretKey, Signature, + SignedBatchMsg, +}; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use std::sync::Arc; +use zksync_consensus_utils::enum_util::Variant; + +impl AggregateSignature { + /// Generate a new aggregate signature from a list of signatures. + pub fn aggregate<'a>(sigs: impl IntoIterator) -> Self { + let mut agg = Self::default(); + for sig in sigs { + agg.add(sig); + } + agg + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> SecretKey { + SecretKey(Arc::new(rng.gen())) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> PublicKey { + PublicKey(rng.gen()) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AggregateSignature { + AggregateSignature(rng.gen()) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AttesterSet { + let count = rng.gen_range(1..11); + let public_keys = (0..count).map(|_| rng.gen()); + AttesterSet::new(public_keys).unwrap() + } +} + +impl Distribution for Standard { + fn sample(&self, _rng: &mut R) -> L1Batch { + L1Batch::default() + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Signature { + Signature(rng.gen()) + } +} + +impl> Distribution> for Standard { + fn sample(&self, rng: &mut R) -> SignedBatchMsg { + rng.gen::().sign_batch_msg(rng.gen()) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> MsgHash { + MsgHash(rng.gen()) + } +} diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs new file mode 100644 index 00000000..ce769f3b --- /dev/null +++ b/node/libs/roles/src/attester/tests.rs @@ -0,0 +1,137 @@ +use crate::validator::{testonly::Setup, Committee, Genesis}; + +use super::*; +use assert_matches::assert_matches; +use rand::Rng; +use zksync_concurrency::ctx; +use zksync_consensus_crypto::{ByteFmt, Text, TextFmt}; +use zksync_protobuf::testonly::{test_encode, test_encode_random}; + +#[test] +fn test_byte_encoding() { + let key = SecretKey::generate(); + assert_eq!( + key.public(), + ::decode(&ByteFmt::encode(&key)) + .unwrap() + .public() + ); + assert_eq!( + key.public(), + ByteFmt::decode(&ByteFmt::encode(&key.public())).unwrap() + ); +} + +#[test] +fn test_text_encoding() { + let key = SecretKey::generate(); + let t1 = TextFmt::encode(&key); + let t2 = TextFmt::encode(&key.public()); + assert_eq!( + key.public(), + Text::new(&t1).decode::().unwrap().public() + ); + assert_eq!(key.public(), Text::new(&t2).decode().unwrap()); + assert!(Text::new(&t1).decode::().is_err()); + assert!(Text::new(&t2).decode::().is_err()); +} + +#[test] +fn test_schema_encoding() { + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + test_encode_random::>(rng); + let key = rng.gen::().public(); + test_encode(rng, &key); + test_encode_random::(rng); +} + +#[test] +fn test_signature_verify() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let msg1: MsgHash = rng.gen(); + let msg2: MsgHash = rng.gen(); + + let key1: SecretKey = rng.gen(); + let key2: SecretKey = rng.gen(); + + let sig1 = key1.sign_hash(&msg1); + + // Matching key and message. + sig1.verify_hash(&msg1, &key1.public()).unwrap(); + + // Mismatching message. + assert!(sig1.verify_hash(&msg2, &key1.public()).is_err()); + + // Mismatching key. + assert!(sig1.verify_hash(&msg1, &key2.public()).is_err()); +} + +#[test] +fn test_agg_signature_verify() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let msg1: MsgHash = rng.gen(); + let msg2: MsgHash = rng.gen(); + + let key1: SecretKey = rng.gen(); + let key2: SecretKey = rng.gen(); + + let sig1 = key1.sign_hash(&msg1); + let sig2 = key2.sign_hash(&msg2); + + let agg_sig = AggregateSignature::aggregate(vec![&sig1, &sig2]); + + // Matching key and message. + agg_sig + .verify_hash([(msg1, &key1.public()), (msg2, &key2.public())].into_iter()) + .unwrap(); + + // Mismatching message. + assert!(agg_sig + .verify_hash([(msg2, &key1.public()), (msg1, &key2.public())].into_iter()) + .is_err()); + + // Mismatching key. + assert!(agg_sig + .verify_hash([(msg1, &key2.public()), (msg2, &key1.public())].into_iter()) + .is_err()); +} + +#[test] +fn test_l1_batch_qc() { + use L1BatchQCVerifyError as Error; + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let setup1 = Setup::new(rng, 6); + let setup2 = Setup::new(rng, 6); + let genesis3 = Genesis { + version: setup1.genesis.version, + validators: Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), + attesters: AttesterSet::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(), + fork: setup1.genesis.fork.clone(), + }; + + for i in 0..setup1.attester_keys.len() + 1 { + let mut qc = L1BatchQC::new(L1Batch::default(), &setup1.genesis); + for key in &setup1.attester_keys[0..i] { + qc.add(&key.sign_batch_msg(qc.message.clone()), &setup1.genesis); + } + if i >= setup1.genesis.attesters.threshold() { + assert!(qc.verify(&setup1.genesis).is_ok()); + } else { + assert_matches!( + qc.verify(&setup1.genesis), + Err(Error::NotEnoughSigners { .. }) + ); + } + + // Mismatching validator sets. + assert!(qc.verify(&setup2.genesis).is_err()); + assert!(qc.verify(&genesis3).is_err()); + } +} diff --git a/node/libs/roles/src/lib.rs b/node/libs/roles/src/lib.rs index 8f836ca7..7a5e9366 100644 --- a/node/libs/roles/src/lib.rs +++ b/node/libs/roles/src/lib.rs @@ -6,7 +6,10 @@ //! - `Validator`: a node that participates in the consensus protocol, so it votes for blocks and produces blocks. //! It also participates in the validator network, which is a mesh network just for validators. Not //! every node has this role. +//! - `Attester`: a node that signs the L1 batches and broadcasts the signatures to the gossip network. +//! Not every node has this role. +pub mod attester; pub mod node; pub mod proto; pub mod validator; diff --git a/node/libs/roles/src/proto/attester.proto b/node/libs/roles/src/proto/attester.proto new file mode 100644 index 00000000..1e315d29 --- /dev/null +++ b/node/libs/roles/src/proto/attester.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package zksync.roles.attester; + +message L1Batch { + optional uint64 number = 1; // required +} + +message Msg { + oneof t { // required + L1Batch l1_batch = 4; + } +} + +message SignedBatch { + optional Msg msg = 1; // required + optional PublicKey key = 2; // required + optional Signature sig = 3; // required +} + +message PublicKey { + optional bytes bn254 = 1; // required +} + +message Signature { + optional bytes bn254 = 1; // required +} diff --git a/node/libs/roles/src/proto/validator.proto b/node/libs/roles/src/proto/validator.proto index 1e8eb43d..65e91bd5 100644 --- a/node/libs/roles/src/proto/validator.proto +++ b/node/libs/roles/src/proto/validator.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package zksync.roles.validator; import "zksync/std.proto"; +import "zksync/roles/attester.proto"; message Fork { optional uint64 number = 1; // required; ForkId @@ -14,8 +15,9 @@ message Fork { // a backward compatible change: https://buf.build/docs/breaking/rules#field_same_oneof message Genesis { optional Fork fork = 1; // required - repeated PublicKey validators = 2 [deprecated = true]; + repeated PublicKey validators = 2; repeated WeightedValidator validators_v1 = 3; + repeated attester.PublicKey attesters = 4; } message GenesisHash { @@ -141,15 +143,11 @@ message NetAddress { optional std.Timestamp timestamp = 3; // required } -message L1BatchMsg { -} - message Msg { oneof t { // required ConsensusMsg consensus = 1; bytes session_id = 2; NetAddress net_address = 3; - L1BatchMsg l1_batch = 4; } } diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 7c848144..2ddef725 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,11 +1,15 @@ +use crate::{ + attester::{self, AttesterSet}, + node::SessionId, +}; + use super::{ AggregateSignature, BlockHeader, BlockNumber, CommitQC, Committee, ConsensusMsg, FinalBlock, - Fork, ForkNumber, Genesis, GenesisHash, GenesisVersion, L1BatchMsg, LeaderCommit, - LeaderPrepare, Msg, MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, - ProtocolVersion, PublicKey, ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, View, - ViewNumber, WeightedValidator, + Fork, ForkNumber, Genesis, GenesisHash, GenesisVersion, LeaderCommit, LeaderPrepare, Msg, + MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, + ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, View, ViewNumber, WeightedValidator, }; -use crate::{node::SessionId, proto::validator as proto}; +use crate::proto::validator as proto; use anyhow::Context as _; use std::collections::BTreeMap; use zksync_consensus_crypto::ByteFmt; @@ -62,9 +66,19 @@ impl ProtoFmt for Genesis { } else { (vec![], GenesisVersion::CURRENT) }; + + let attesters: Vec<_> = r + .attesters + .iter() + .enumerate() + .map(|(i, v)| attester::PublicKey::read(v).context(i)) + .collect::>() + .context("validators")?; + Ok(Self { fork: read_required(&r.fork).context("fork")?, validators: Committee::new(validators.into_iter()).context("validators")?, + attesters: AttesterSet::new(attesters.into_iter()).context("attesters")?, version, }) } @@ -73,12 +87,14 @@ impl ProtoFmt for Genesis { GenesisVersion(0) => Self::Proto { fork: Some(self.fork.build()), validators: self.validators.iter().map(|v| v.key.build()).collect(), + attesters: self.attesters.iter().map(|v| v.build()).collect(), validators_v1: vec![], }, GenesisVersion(1..) => Self::Proto { fork: Some(self.fork.build()), validators: vec![], validators_v1: self.validators.iter().map(|v| v.build()).collect(), + attesters: self.attesters.iter().map(|v| v.build()).collect(), }, } } @@ -373,18 +389,6 @@ impl ProtoFmt for NetAddress { } } -impl ProtoFmt for L1BatchMsg { - type Proto = proto::L1BatchMsg; - - fn read(_r: &Self::Proto) -> anyhow::Result { - Ok(Self {}) - } - - fn build(&self) -> Self::Proto { - Self::Proto {} - } -} - impl ProtoFmt for Msg { type Proto = proto::Msg; @@ -394,7 +398,6 @@ impl ProtoFmt for Msg { T::Consensus(r) => Self::Consensus(ProtoFmt::read(r).context("Consensus")?), T::SessionId(r) => Self::SessionId(SessionId(r.clone())), T::NetAddress(r) => Self::NetAddress(ProtoFmt::read(r).context("NetAddress")?), - T::L1Batch(r) => Self::L1Batch(ProtoFmt::read(r).context("L1Batch")?), }) } @@ -405,7 +408,6 @@ impl ProtoFmt for Msg { Self::Consensus(x) => T::Consensus(x.build()), Self::SessionId(x) => T::SessionId(x.0.clone()), Self::NetAddress(x) => T::NetAddress(x.build()), - Self::L1Batch(x) => T::L1Batch(x.build()), }; Self::Proto { t: Some(t) } diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index 52639b9c..0123c9f9 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -1,6 +1,6 @@ //! Messages related to the consensus protocol. use super::{BlockNumber, LeaderCommit, LeaderPrepare, Msg, ReplicaCommit, ReplicaPrepare}; -use crate::validator; +use crate::{attester::AttesterSet, validator}; use anyhow::Context; use bit_vec::BitVec; use std::{collections::BTreeMap, fmt}; @@ -212,6 +212,8 @@ pub struct Genesis { pub version: GenesisVersion, /// Set of validators of the chain. pub validators: Committee, + /// Set of attesters of the chain. + pub attesters: AttesterSet, /// Fork of the chain to follow. pub fork: Fork, } @@ -232,6 +234,7 @@ impl Default for Genesis { Self { version: GenesisVersion::CURRENT, validators: Committee::default(), + attesters: AttesterSet::default(), fork: Fork::default(), } } diff --git a/node/libs/roles/src/validator/messages/mod.rs b/node/libs/roles/src/validator/messages/mod.rs index 3e7d9d5b..90189607 100644 --- a/node/libs/roles/src/validator/messages/mod.rs +++ b/node/libs/roles/src/validator/messages/mod.rs @@ -3,7 +3,6 @@ mod block; mod consensus; mod discovery; -mod l1_batch; mod leader_commit; mod leader_prepare; mod msg; @@ -15,7 +14,6 @@ mod tests; pub use block::*; pub use consensus::*; pub use discovery::*; -pub use l1_batch::*; pub use leader_commit::*; pub use leader_prepare::*; pub use msg::*; diff --git a/node/libs/roles/src/validator/messages/msg.rs b/node/libs/roles/src/validator/messages/msg.rs index 8c2da3d5..c20d5f8b 100644 --- a/node/libs/roles/src/validator/messages/msg.rs +++ b/node/libs/roles/src/validator/messages/msg.rs @@ -1,5 +1,5 @@ //! Generic message types. -use super::{ConsensusMsg, L1BatchMsg, NetAddress}; +use super::{ConsensusMsg, NetAddress}; use crate::{ node::SessionId, validator::{self}, @@ -17,8 +17,6 @@ pub enum Msg { SessionId(SessionId), /// validator discovery NetAddress(NetAddress), - /// l1 batch - L1Batch(L1BatchMsg), } impl Msg { @@ -64,18 +62,6 @@ impl Variant for NetAddress { } } -impl Variant for L1BatchMsg { - fn insert(self) -> Msg { - Msg::L1Batch(self) - } - fn extract(msg: Msg) -> Result { - let Msg::L1Batch(this) = msg else { - return Err(BadVariantError); - }; - Ok(this) - } -} - /// Hash of a message. #[derive(Clone, Copy, PartialEq, Eq)] pub struct MsgHash(pub(crate) keccak256::Keccak256); diff --git a/node/libs/roles/src/validator/messages/tests.rs b/node/libs/roles/src/validator/messages/tests.rs index ea2c8e8b..5383bc90 100644 --- a/node/libs/roles/src/validator/messages/tests.rs +++ b/node/libs/roles/src/validator/messages/tests.rs @@ -1,9 +1,10 @@ +use crate::attester::{self, AttesterSet}; use crate::validator::*; use zksync_consensus_crypto::Text; use zksync_consensus_utils::enum_util::Variant as _; /// Hardcoded secret keys. -fn keys() -> Vec { +fn validator_keys() -> Vec { [ "validator:secret:bn254:27cb45b1670a1ae8d376a85821d51c7f91ebc6e32788027a84758441aaf0a987", "validator:secret:bn254:20132edc08a529e927f155e710ae7295a2a0d249f1b1f37726894d1d0d8f0d81", @@ -14,6 +15,17 @@ fn keys() -> Vec { .collect() } +fn attester_keys() -> Vec { + [ + "attester:secret:bn254:27cb45b1670a1ae8d376a85821d51c7f91ebc6e32788027a84758441aaf0a987", + "attester:secret:bn254:20132edc08a529e927f155e710ae7295a2a0d249f1b1f37726894d1d0d8f0d81", + "attester:secret:bn254:0946901f0a6650284726763b12de5da0f06df0016c8ec2144cf6b1903f1979a6", + ] + .iter() + .map(|raw| Text::new(raw).decode().unwrap()) + .collect() +} + /// Hardcoded payload. fn payload() -> Payload { Payload( @@ -33,11 +45,12 @@ fn fork() -> Fork { /// Hardcoded v0 genesis. fn genesis_v0() -> Genesis { Genesis { - validators: Committee::new(keys().iter().map(|k| WeightedValidator { + validators: Committee::new(validator_keys().iter().map(|k| WeightedValidator { key: k.public(), weight: 1, })) .unwrap(), + attesters: AttesterSet::new(attester_keys().iter().map(|k| k.public())).unwrap(), fork: fork(), version: GenesisVersion(0), } @@ -46,11 +59,12 @@ fn genesis_v0() -> Genesis { /// Hardcoded v1 genesis. fn genesis_v1() -> Genesis { Genesis { - validators: Committee::new(keys().iter().map(|k| WeightedValidator { + validators: Committee::new(validator_keys().iter().map(|k| WeightedValidator { key: k.public(), weight: 1, })) .unwrap(), + attesters: AttesterSet::new(attester_keys().iter().map(|k| k.public())).unwrap(), fork: fork(), version: GenesisVersion(1), } @@ -72,7 +86,7 @@ fn payload_hash_change_detector() { #[test] fn genesis_v0_hash_change_detector() { let want: GenesisHash = Text::new( - "genesis_hash:keccak256:9c9bfa303e8d2d451a7fadd327f5f1b957a37c84d7b27b9e1cf7b92fd83af7ae", + "genesis_hash:keccak256:d571e391b15e516f98afc1c286c62eeda54e56f23bf27c456be0c53ca45e6b32", ) .decode() .unwrap(); @@ -82,7 +96,7 @@ fn genesis_v0_hash_change_detector() { #[test] fn genesis_v1_hash_change_detector() { let want: GenesisHash = Text::new( - "genesis_hash:keccak256:6370cfce637395629f05599082993c446c2c66145d440287a985ac98ad210b41", + "genesis_hash:keccak256:6d8be786ae9becb70ba2cd5c53634a7b170ccb9930fba7730d96e0fbf7486756", ) .decode() .unwrap(); @@ -100,7 +114,7 @@ mod version1 { let hash: MsgHash = Text::new(hash).decode().unwrap(); assert!(hash == msg.hash(), "bad hash, want {:?}", msg.hash()); let sig: Signature = Text::new(sig).decode().unwrap(); - let key = keys()[0].clone(); + let key = validator_keys()[0].clone(); assert!( sig.verify_hash(&hash, &key.public()).is_ok(), "bad signature, want {:?}", @@ -138,7 +152,7 @@ mod version1 { let genesis = genesis_v1(); let replica_commit = replica_commit(); let mut x = CommitQC::new(replica_commit.clone(), &genesis); - for k in keys() { + for k in validator_keys() { x.add(&k.sign_msg(replica_commit.clone()), &genesis); } x @@ -165,7 +179,7 @@ mod version1 { let mut x = PrepareQC::new(view()); let genesis = genesis_v1(); let replica_prepare = replica_prepare(); - for k in keys() { + for k in validator_keys() { x.add(&k.sign_msg(replica_prepare.clone()), &genesis); } x diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 9af49731..1796a50b 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -1,4 +1,6 @@ //! Test-only utilities. +use crate::attester::{self, AttesterSet}; + use super::{ AggregateSignature, BlockHeader, BlockNumber, CommitQC, Committee, ConsensusMsg, FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, GenesisVersion, LeaderCommit, LeaderPrepare, Msg, @@ -22,18 +24,24 @@ pub struct Setup(SetupInner); impl Setup { /// New `Setup` with a given `fork`. pub fn new_with_fork(rng: &mut impl Rng, weights: Vec, fork: Fork) -> Self { - let keys: Vec = (0..weights.len()).map(|_| rng.gen()).collect(); + let validator_keys: Vec = (0..weights.len()).map(|_| rng.gen()).collect(); + let attester_keys: Vec = + (0..weights.len()).map(|_| rng.gen()).collect(); let genesis = Genesis { - validators: Committee::new(keys.iter().enumerate().map(|(i, k)| WeightedValidator { - key: k.public(), - weight: weights[i], + validators: Committee::new(validator_keys.iter().enumerate().map(|(i, k)| { + WeightedValidator { + key: k.public(), + weight: weights[i], + } })) .unwrap(), + attesters: AttesterSet::new(attester_keys.iter().map(|k| k.public())).unwrap(), fork, ..Default::default() }; Self(SetupInner { - keys, + validator_keys, + attester_keys, genesis, blocks: vec![], }) @@ -85,7 +93,7 @@ impl Setup { }; let msg = ReplicaCommit { view, proposal }; let mut justification = CommitQC::new(msg, &self.0.genesis); - for key in &self.0.keys { + for key in &self.0.validator_keys { justification.add( &key.sign_msg(justification.message.clone()), &self.0.genesis, @@ -115,7 +123,9 @@ impl Setup { #[derive(Debug, Clone)] pub struct SetupInner { /// Validators' secret keys. - pub keys: Vec, + pub validator_keys: Vec, + /// Attesters' secret keys. + pub attester_keys: Vec, /// Past blocks. pub blocks: Vec, /// Genesis config. @@ -201,6 +211,7 @@ impl Distribution for Standard { fn sample(&self, rng: &mut R) -> Genesis { Genesis { validators: rng.gen(), + attesters: rng.gen(), fork: rng.gen(), version: rng.gen(), } diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index d9975ed0..c2e3950d 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -1,5 +1,5 @@ use super::*; -use crate::validator::testonly::Setup; +use crate::{attester::AttesterSet, validator::testonly::Setup}; use assert_matches::assert_matches; use rand::{seq::SliceRandom, Rng}; use std::vec; @@ -174,7 +174,7 @@ fn make_replica_commit(rng: &mut impl Rng, view: ViewNumber, setup: &Setup) -> R fn make_commit_qc(rng: &mut impl Rng, view: ViewNumber, setup: &Setup) -> CommitQC { let mut qc = CommitQC::new(make_replica_commit(rng, view, setup), &setup.genesis); - for key in &setup.keys { + for key in &setup.validator_keys { qc.add(&key.sign_msg(qc.message.clone()), &setup.genesis); } qc @@ -205,15 +205,16 @@ fn test_commit_qc() { let setup2 = Setup::new(rng, 6); let genesis3 = Genesis { validators: Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), + attesters: AttesterSet::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(), fork: setup1.genesis.fork.clone(), ..Default::default() }; let validator_weight = setup1.genesis.validators.total_weight() / 6; - for i in 0..setup1.keys.len() + 1 { + for i in 0..setup1.validator_keys.len() + 1 { let view = rng.gen(); let mut qc = CommitQC::new(make_replica_commit(rng, view, &setup1), &setup1.genesis); - for key in &setup1.keys[0..i] { + for key in &setup1.validator_keys[0..i] { qc.add(&key.sign_msg(qc.message.clone()), &setup1.genesis); } let expected_weight = i as u64 * validator_weight; @@ -243,6 +244,7 @@ fn test_prepare_qc() { let setup2 = Setup::new(rng, 6); let genesis3 = Genesis { validators: Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), + attesters: AttesterSet::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(), fork: setup1.genesis.fork.clone(), ..Default::default() }; @@ -252,9 +254,9 @@ fn test_prepare_qc() { .map(|_| make_replica_prepare(rng, view, &setup1)) .collect(); - for n in 0..setup1.keys.len() + 1 { + for n in 0..setup1.validator_keys.len() + 1 { let mut qc = PrepareQC::new(msgs[0].view.clone()); - for key in &setup1.keys[0..n] { + for key in &setup1.validator_keys[0..n] { qc.add( &key.sign_msg(msgs.choose(rng).unwrap().clone()), &setup1.genesis, @@ -290,7 +292,7 @@ fn test_validator_committee_weights() { let msg = make_replica_prepare(rng, view, &setup); let mut qc = PrepareQC::new(msg.view.clone()); for (n, weight) in sums.iter().enumerate() { - let key = &setup.keys[n]; + let key = &setup.validator_keys[n]; qc.add(&key.sign_msg(msg.clone()), &setup.genesis); let signers = &qc.map[&msg]; assert_eq!(setup.genesis.validators.weight(signers), *weight); diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 49bea272..609f2628 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -29,7 +29,7 @@ fn generate_consensus_nodes(nodes: usize, seed_nodes_amount: Option) -> V let rng = &mut rand::thread_rng(); let setup = validator::testonly::Setup::new(rng, nodes); - let validator_keys = setup.keys.clone(); + let validator_keys = setup.validator_keys.clone(); // Each node will have `gossip_peers` outbound peers. let peers = 2; diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index b39d2196..287bc842 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -57,7 +57,7 @@ fn main() -> anyhow::Result<()> { let rng = &mut rand::thread_rng(); let setup = validator::testonly::Setup::new(rng, validator_count); - let validator_keys = setup.keys.clone(); + let validator_keys = setup.validator_keys.clone(); // Each node will have `gossip_peers` outbound peers. let nodes = addrs.len(); From 8d7650986a74233417a35577a7e217433b1b2577 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Thu, 25 Apr 2024 15:16:49 +0200 Subject: [PATCH 21/79] Rewritten sync_blocks actor to maximize utilization. (#101) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previous implementation severely limited the total throughput. --------- Co-authored-by: Bruno França --- .github/CODEOWNERS | 2 +- docs/architecture.md | 7 +- node/Cargo.lock | 309 +++++------ node/Cargo.toml | 2 - node/actors/bft/src/testonly/node.rs | 4 - node/actors/executor/Cargo.toml | 1 - node/actors/executor/src/io.rs | 23 - node/actors/executor/src/lib.rs | 26 +- node/actors/network/src/config.rs | 10 +- node/actors/network/src/consensus/mod.rs | 6 +- node/actors/network/src/consensus/tests.rs | 1 + node/actors/network/src/gossip/fetch.rs | 104 ++++ node/actors/network/src/gossip/mod.rs | 79 +-- node/actors/network/src/gossip/runner.rs | 129 +++-- node/actors/network/src/gossip/testonly.rs | 147 +++++ node/actors/network/src/gossip/tests/fetch.rs | 313 +++++++++++ .../src/gossip/{tests.rs => tests/mod.rs} | 215 +------- .../network/src/gossip/tests/syncing.rs | 304 ++++++++++ node/actors/network/src/io.rs | 56 +- node/actors/network/src/lib.rs | 25 +- .../actors/network/src/mux/reusable_stream.rs | 16 +- node/actors/network/src/mux/tests/mod.rs | 39 +- node/actors/network/src/rpc/mod.rs | 103 ++-- node/actors/network/src/rpc/tests.rs | 9 +- node/actors/network/src/testonly.rs | 2 + node/actors/sync_blocks/Cargo.toml | 28 - node/actors/sync_blocks/src/config.rs | 56 -- node/actors/sync_blocks/src/io.rs | 29 - node/actors/sync_blocks/src/lib.rs | 57 -- node/actors/sync_blocks/src/peers/events.rs | 18 - node/actors/sync_blocks/src/peers/mod.rs | 224 -------- .../sync_blocks/src/peers/tests/basics.rs | 522 ------------------ .../sync_blocks/src/peers/tests/fakes.rs | 141 ----- .../actors/sync_blocks/src/peers/tests/mod.rs | 157 ------ .../src/peers/tests/multiple_peers.rs | 345 ------------ .../sync_blocks/src/tests/end_to_end.rs | 393 ------------- node/actors/sync_blocks/src/tests/mod.rs | 24 - node/deny.toml | 6 +- node/libs/concurrency/src/ctx/mod.rs | 23 +- node/libs/concurrency/src/sync/mod.rs | 2 + node/libs/concurrency/src/time.rs | 1 + 41 files changed, 1273 insertions(+), 2685 deletions(-) create mode 100644 node/actors/network/src/gossip/fetch.rs create mode 100644 node/actors/network/src/gossip/testonly.rs create mode 100644 node/actors/network/src/gossip/tests/fetch.rs rename node/actors/network/src/gossip/{tests.rs => tests/mod.rs} (67%) create mode 100644 node/actors/network/src/gossip/tests/syncing.rs delete mode 100644 node/actors/sync_blocks/Cargo.toml delete mode 100644 node/actors/sync_blocks/src/config.rs delete mode 100644 node/actors/sync_blocks/src/io.rs delete mode 100644 node/actors/sync_blocks/src/lib.rs delete mode 100644 node/actors/sync_blocks/src/peers/events.rs delete mode 100644 node/actors/sync_blocks/src/peers/mod.rs delete mode 100644 node/actors/sync_blocks/src/peers/tests/basics.rs delete mode 100644 node/actors/sync_blocks/src/peers/tests/fakes.rs delete mode 100644 node/actors/sync_blocks/src/peers/tests/mod.rs delete mode 100644 node/actors/sync_blocks/src/peers/tests/multiple_peers.rs delete mode 100644 node/actors/sync_blocks/src/tests/end_to_end.rs delete mode 100644 node/actors/sync_blocks/src/tests/mod.rs diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 078f73a4..73923fa6 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,6 +1,6 @@ CODEOWNERS @brunoffranca -/node/actors/consensus/ @brunoffranca +/node/actors/bft/ @brunoffranca /node/actors/network/ @pompon0 /node/libs/concurrency/ @pompon0 diff --git a/docs/architecture.md b/docs/architecture.md index a9bbfc63..8907d8fd 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -12,9 +12,7 @@ The actor crates are where the vast majority of the work is done. Each of them m - the `executor` crate is responsible for parsing the configuration parameters given by the user, and initializing the actors and the storage. It's basically the bootloader for the node. It also dispatches messages between the rest of the actors. They all send messages to the executor and it then converts and forwards the messages to the desired destination. This improves the encapsulation of the actors. -- the `network` crate which maintains a pool of outbound and inbound connections to other nodes. - -- the `sync_blocks` crate implements a block syncing mechanism for nodes. It enables nodes to exchange blocks with each other in a peer-to-peer network, allowing them to keep a copy of the blockchain stored in their local storage up-to-date. +- the `network` crate which maintains a pool of outbound and inbound connections to other nodes. It also implements a syncing mechanism for nodes (for blocks, batches, attester signatures, etc). ### Library crates @@ -44,8 +42,7 @@ This section provides a physical map of folders & files in this repository. - `/bft`: The consensus actor. - `/executor`: The actor orchestrator. - - `/network`: The networking actor. - - `/sync_blocks`: The block syncing actor. + - `/network`: The network actor. - `/lib`: All the library crates used as dependencies of the actor crates above. diff --git a/node/Cargo.lock b/node/Cargo.lock index e288e042..7739f03b 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -151,9 +151,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "assert_matches" @@ -163,13 +163,13 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-trait" -version = "0.1.79" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -216,6 +216,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "base64ct" version = "1.6.0" @@ -249,7 +255,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -320,9 +326,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.4" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byteorder" @@ -355,12 +361,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.90" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -404,15 +411,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -492,10 +499,10 @@ version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -659,7 +666,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -683,7 +690,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -694,7 +701,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -792,9 +799,9 @@ dependencies = [ [[package]] name = "either" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "elsa" @@ -857,9 +864,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" +checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e" [[package]] name = "fixedbitset" @@ -944,7 +951,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -989,9 +996,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "libc", @@ -1041,9 +1048,9 @@ dependencies = [ [[package]] name = "half" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ "cfg-if", "crunchy", @@ -1059,12 +1066,6 @@ dependencies = [ "allocator-api2", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -1287,9 +1288,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] @@ -1302,9 +1303,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.28" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -1534,7 +1535,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1588,7 +1589,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -1635,7 +1636,7 @@ checksum = "adf157a4dc5a29b7b464aa8fe7edeff30076e07e13646a1c3874f58477dc99f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1680,7 +1681,7 @@ dependencies = [ "proc-macro2", "quote", "regex-syntax 0.6.29", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1737,7 +1738,7 @@ checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1774,9 +1775,9 @@ dependencies = [ [[package]] name = "multimap" -version = "0.8.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] name = "nom" @@ -1934,11 +1935,11 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "serde", ] @@ -1979,7 +1980,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2020,7 +2021,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2132,19 +2133,19 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" +checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550" dependencies = [ "proc-macro2", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -2169,14 +2170,14 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "prost" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" +checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" dependencies = [ "bytes", "prost-derive", @@ -2184,13 +2185,13 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2" +checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" dependencies = [ "bytes", - "heck 0.4.1", - "itertools 0.11.0", + "heck", + "itertools 0.12.1", "log", "multimap", "once_cell", @@ -2199,22 +2200,21 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.58", + "syn 2.0.60", "tempfile", - "which", ] [[package]] name = "prost-derive" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" +checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2235,9 +2235,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" +checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" dependencies = [ "prost", ] @@ -2280,9 +2280,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -2490,9 +2490,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", @@ -2503,9 +2503,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring", @@ -2649,9 +2649,9 @@ checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" dependencies = [ "serde_derive", ] @@ -2668,13 +2668,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2690,9 +2690,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", "ryu", @@ -2763,9 +2763,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -2899,9 +2899,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.58" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", @@ -2937,7 +2937,7 @@ checksum = "f9b53c7124dd88026d5d98a1eb1fd062a578b7d783017c9298825526c7fb6427" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2956,22 +2956,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2995,9 +2995,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "num-conv", @@ -3074,7 +3074,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3184,7 +3184,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3369,7 +3369,7 @@ source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3418,7 +3418,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "wasm-bindgen-shared", ] @@ -3440,7 +3440,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3461,18 +3461,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix", -] - [[package]] name = "winapi" version = "0.3.9" @@ -3491,11 +3479,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "134306a13c5647ad6453e8deaec55d3a44d6021970129e6188735e74bf546697" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -3510,7 +3498,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -3528,7 +3516,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -3548,17 +3536,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -3569,9 +3558,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -3581,9 +3570,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -3593,9 +3582,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -3605,9 +3600,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -3617,9 +3612,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -3629,9 +3624,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -3641,9 +3636,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "yansi" @@ -3668,14 +3663,14 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "63381fa6624bf92130a6b87c0d07380116f80b565c42cf0d754136f0238359ef" dependencies = [ "zeroize_derive", ] @@ -3688,7 +3683,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3766,7 +3761,6 @@ dependencies = [ "zksync_consensus_network", "zksync_consensus_roles", "zksync_consensus_storage", - "zksync_consensus_sync_blocks", "zksync_consensus_utils", "zksync_protobuf", ] @@ -3840,25 +3834,6 @@ dependencies = [ "zksync_protobuf_build", ] -[[package]] -name = "zksync_consensus_sync_blocks" -version = "0.1.0" -dependencies = [ - "anyhow", - "assert_matches", - "async-trait", - "rand 0.8.5", - "test-casing", - "thiserror", - "tokio", - "tracing", - "zksync_concurrency", - "zksync_consensus_network", - "zksync_consensus_roles", - "zksync_consensus_storage", - "zksync_consensus_utils", -] - [[package]] name = "zksync_consensus_tools" version = "0.1.0" @@ -3927,14 +3902,14 @@ name = "zksync_protobuf_build" version = "0.1.0" dependencies = [ "anyhow", - "heck 0.5.0", + "heck", "prettyplease", "proc-macro2", "prost-build", "prost-reflect", "protox", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] diff --git a/node/Cargo.toml b/node/Cargo.toml index 32aa5c97..c42f3ac3 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -3,7 +3,6 @@ members = [ "actors/bft", "actors/executor", "actors/network", - "actors/sync_blocks", "libs/concurrency", "libs/crypto", "libs/protobuf", @@ -30,7 +29,6 @@ zksync_consensus_executor = { path = "actors/executor" } zksync_consensus_network = { path = "actors/network" } zksync_consensus_roles = { path = "libs/roles" } zksync_consensus_storage = { path = "libs/storage" } -zksync_consensus_sync_blocks = { path = "actors/sync_blocks" } zksync_consensus_tools = { path = "tools" } zksync_consensus_utils = { path = "libs/utils" } diff --git a/node/actors/bft/src/testonly/node.rs b/node/actors/bft/src/testonly/node.rs index d6a25774..d08a52d0 100644 --- a/node/actors/bft/src/testonly/node.rs +++ b/node/actors/bft/src/testonly/node.rs @@ -81,10 +81,6 @@ impl Node { network::io::OutputMessage::Consensus(req) => { con_send.send(io::InputMessage::Network(req)); } - network::io::OutputMessage::SyncBlocks(_) => { - // Drop message related to block syncing; the nodes should work fine - // without them. - } } } Ok(()) diff --git a/node/actors/executor/Cargo.toml b/node/actors/executor/Cargo.toml index 3fec785b..be95dcb3 100644 --- a/node/actors/executor/Cargo.toml +++ b/node/actors/executor/Cargo.toml @@ -13,7 +13,6 @@ zksync_consensus_crypto.workspace = true zksync_consensus_network.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_storage.workspace = true -zksync_consensus_sync_blocks.workspace = true zksync_consensus_utils.workspace = true zksync_protobuf.workspace = true diff --git a/node/actors/executor/src/io.rs b/node/actors/executor/src/io.rs index 5379092f..fe8df10a 100644 --- a/node/actors/executor/src/io.rs +++ b/node/actors/executor/src/io.rs @@ -10,9 +10,6 @@ use zksync_consensus_bft::io::{ use zksync_consensus_network::io::{ InputMessage as NetworkInputMessage, OutputMessage as NetworkOutputMessage, }; -use zksync_consensus_sync_blocks::io::{ - InputMessage as SyncBlocksInputMessage, OutputMessage as SyncBlocksOutputMessage, -}; use zksync_consensus_utils::pipe::DispatcherPipe; /// The IO dispatcher, it is the main struct to handle actor messages. It simply contains a sender and a receiver for @@ -21,8 +18,6 @@ use zksync_consensus_utils::pipe::DispatcherPipe; pub(super) struct Dispatcher { consensus_input: channel::UnboundedSender, consensus_output: channel::UnboundedReceiver, - sync_blocks_input: channel::UnboundedSender, - sync_blocks_output: channel::UnboundedReceiver, network_input: channel::UnboundedSender, network_output: channel::UnboundedReceiver, } @@ -31,14 +26,11 @@ impl Dispatcher { /// Creates a new IO Dispatcher. pub(super) fn new( consensus_pipe: DispatcherPipe, - sync_blocks_pipe: DispatcherPipe, network_pipe: DispatcherPipe, ) -> Self { Dispatcher { consensus_input: consensus_pipe.send, consensus_output: consensus_pipe.recv, - sync_blocks_input: sync_blocks_pipe.send, - sync_blocks_output: sync_blocks_pipe.recv, network_input: network_pipe.send, network_output: network_pipe.recv, } @@ -60,17 +52,6 @@ impl Dispatcher { Ok(()) }); - s.spawn(async { - while let Ok(msg) = self.sync_blocks_output.recv(ctx).await { - match msg { - SyncBlocksOutputMessage::Network(message) => { - self.network_input.send(message.into()); - } - } - } - Ok(()) - }); - // Start a task to handle the messages from the network actor. s.spawn(async { while let Ok(msg) = self.network_output.recv(ctx).await { @@ -79,10 +60,6 @@ impl Dispatcher { self.consensus_input .send(ConsensusInputMessage::Network(message)); } - NetworkOutputMessage::SyncBlocks(message) => { - self.sync_blocks_input - .send(SyncBlocksInputMessage::Network(message)); - } } } Ok(()) diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 9c4e4688..d75d8f1d 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -11,7 +11,6 @@ use zksync_consensus_bft as bft; use zksync_consensus_network as network; use zksync_consensus_roles::{node, validator}; use zksync_consensus_storage::{BlockStore, ReplicaStore}; -use zksync_consensus_sync_blocks as sync_blocks; use zksync_consensus_utils::pipe; use zksync_protobuf::kB; @@ -95,6 +94,7 @@ impl Executor { validator_key: self.validator.as_ref().map(|v| v.key.clone()), ping_timeout: Some(time::Duration::seconds(10)), max_block_size: self.config.max_payload_size.saturating_add(kB), + max_block_queue_size: 20, tcp_accept_rate: limiter::Rate { burst: 10, refresh: time::Duration::milliseconds(100), @@ -126,27 +126,13 @@ impl Executor { // Generate the communication pipes. We have one for each actor. let (consensus_actor_pipe, consensus_dispatcher_pipe) = pipe::new(); - let (sync_blocks_actor_pipe, sync_blocks_dispatcher_pipe) = pipe::new(); let (network_actor_pipe, network_dispatcher_pipe) = pipe::new(); // Create the IO dispatcher. - let mut dispatcher = Dispatcher::new( - consensus_dispatcher_pipe, - sync_blocks_dispatcher_pipe, - network_dispatcher_pipe, - ); + let mut dispatcher = Dispatcher::new(consensus_dispatcher_pipe, network_dispatcher_pipe); tracing::debug!("Starting actors in separate threads."); scope::run!(ctx, |ctx, s| async { s.spawn_blocking(|| dispatcher.run(ctx).context("IO Dispatcher stopped")); - s.spawn(async { - let (net, runner) = network::Network::new( - network_config, - self.block_store.clone(), - network_actor_pipe, - ); - net.register_metrics(); - runner.run(ctx).await.context("Network stopped") - }); if let Some(validator) = self.validator { s.spawn(async { let validator = validator; @@ -162,10 +148,10 @@ impl Executor { .context("Consensus stopped") }); } - sync_blocks::Config::new() - .run(ctx, sync_blocks_actor_pipe, self.block_store.clone()) - .await - .context("Syncing blocks stopped") + let (net, runner) = + network::Network::new(network_config, self.block_store.clone(), network_actor_pipe); + net.register_metrics(); + runner.run(ctx).await.context("Network stopped") }) .await } diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs index ab297458..e757a8fc 100644 --- a/node/actors/network/src/config.rs +++ b/node/actors/network/src/config.rs @@ -14,8 +14,10 @@ pub struct RpcConfig { pub push_validator_addrs_rate: limiter::Rate, /// Max rate of sending/receiving push_block_store_state messages. pub push_block_store_state_rate: limiter::Rate, - /// Max rate of sending/receiving get_block RPCs. + /// Max rate of sending/receiving `get_block` RPCs. pub get_block_rate: limiter::Rate, + /// Timeout for the `get_block` RPC. + pub get_block_timeout: Option, /// Max rate of sending/receiving consensus messages. pub consensus_rate: limiter::Rate, } @@ -35,6 +37,7 @@ impl Default for RpcConfig { burst: 10, refresh: time::Duration::milliseconds(100), }, + get_block_timeout: Some(time::Duration::seconds(10)), consensus_rate: limiter::Rate { burst: 10, refresh: time::Duration::ZERO, @@ -84,4 +87,9 @@ pub struct Config { pub tcp_accept_rate: limiter::Rate, /// Rate limiting config for RPCs. pub rpc: RpcConfig, + /// Maximum number of not-yet-persisted blocks fetched from the network. + /// If reached, network actor will wait for more blocks to get persisted + /// before fetching the next ones. It is useful for limiting memory consumption + /// when the block persisting rate is low. + pub max_block_queue_size: usize, } diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 1acf0c24..bbd7bd87 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -175,8 +175,8 @@ impl Network { tracing::info!("peer = {peer:?}"); let res = scope::run!(ctx, |ctx, s| async { let mut service = rpc::Service::new() - .add_server(rpc::ping::Server, rpc::ping::RATE) - .add_server(self, self.gossip.cfg.rpc.consensus_rate); + .add_server(ctx, rpc::ping::Server, rpc::ping::RATE) + .add_server(ctx, self, self.gossip.cfg.rpc.consensus_rate); if let Some(ping_timeout) = &self.gossip.cfg.ping_timeout { let ping_client = rpc::Client::::new(ctx, rpc::ping::RATE); service = service.add_client(&ping_client); @@ -215,7 +215,7 @@ impl Network { rpc::Client::::new(ctx, self.gossip.cfg.rpc.consensus_rate); let res = scope::run!(ctx, |ctx, s| async { let mut service = rpc::Service::new() - .add_server(rpc::ping::Server, rpc::ping::RATE) + .add_server(ctx, rpc::ping::Server, rpc::ping::RATE) .add_client(&consensus_cli); if let Some(ping_timeout) = &self.gossip.cfg.ping_timeout { let ping_client = rpc::Client::::new(ctx, rpc::ping::RATE); diff --git a/node/actors/network/src/consensus/tests.rs b/node/actors/network/src/consensus/tests.rs index aaf4f21e..3b3c5bf6 100644 --- a/node/actors/network/src/consensus/tests.rs +++ b/node/actors/network/src/consensus/tests.rs @@ -1,3 +1,4 @@ +#![allow(irrefutable_let_patterns)] use super::*; use crate::{io, metrics, preface, rpc, testonly}; use assert_matches::assert_matches; diff --git a/node/actors/network/src/gossip/fetch.rs b/node/actors/network/src/gossip/fetch.rs new file mode 100644 index 00000000..115852bd --- /dev/null +++ b/node/actors/network/src/gossip/fetch.rs @@ -0,0 +1,104 @@ +#![allow(unused)] +use anyhow::Context as _; +use std::collections::BTreeMap; +use zksync_concurrency::{ctx, oneshot, scope, sync}; +use zksync_consensus_roles::validator; +use zksync_consensus_storage::BlockStoreState; + +/// A block fetching request. +type Call = (validator::BlockNumber, oneshot::Sender<()>); + +/// Inner state of the `Queue`. +type Inner = BTreeMap>; + +/// Queue of block fetch request. +pub(crate) struct Queue(sync::watch::Sender); + +impl Default for Queue { + fn default() -> Self { + Self(sync::watch::channel(Inner::default()).0) + } +} + +impl Queue { + /// Requests a block from peers and waits until it is stored. + /// Note: in the current implementation concurrent calls for the same block number are + /// unsupported - second call will override the first call. + pub(crate) async fn request( + &self, + ctx: &ctx::Ctx, + n: validator::BlockNumber, + ) -> ctx::OrCanceled<()> { + loop { + let (send, recv) = oneshot::channel(); + self.0.send_if_modified(|x| { + x.insert(n, send); + // Send iff the lowest requested block changed. + x.first_key_value().unwrap().0 == &n + }); + match recv.recv_or_disconnected(ctx).await { + // Return if completed. + Ok(Ok(())) => return Ok(()), + // Retry if failed. + Ok(Err(sync::Disconnected)) => continue, + // Remove the request from the queue if canceled. + Err(ctx::Canceled) => { + self.0.send_if_modified(|x| { + let modified = x.first_key_value().map_or(false, |(k, _)| k == &n); + x.remove(&n); + // Send iff the lowest requested block changed. + modified + }); + return Err(ctx::Canceled); + } + } + } + } + + /// Accepts a block fetch request, which is contained in the available blocks range. + /// Caller is responsible for fetching the block and adding it to the block store. + pub(crate) async fn accept( + &self, + ctx: &ctx::Ctx, + available: &mut sync::watch::Receiver, + ) -> ctx::OrCanceled { + let sub = &mut self.0.subscribe(); + while ctx.is_active() { + // Wait for the lowest requested block to be available. + // This scope is always cancelled, so we ignore the result. + let mut block_number = None; + let _: Result<(), _> = scope::run!(ctx, |ctx, s| async { + if let Some(n) = sub.borrow_and_update().first_key_value().map(|x| *x.0) { + let n = ctx::NoCopy(n); + s.spawn::<()>(async { + let n = n; + sync::wait_for(ctx, available, |a| a.contains(n.0)).await?; + block_number = Some(n.0); + Err(ctx::Canceled) + }); + } + // If the lowest requested block changes, we need to restart the wait. + sync::changed(ctx, sub).await?; + Err(ctx::Canceled) + }) + .await; + let Some(block_number) = block_number else { + continue; + }; + + // Remove the request from the queue. + let mut res = None; + self.0.send_if_modified(|x| { + res = x.remove_entry(&block_number); + // Send iff the lowest requested block changed. + res.is_some() && !x.is_empty() + }); + // It may happen that someone else accepts our request faster. + // In this case we need to wait again. + if let Some(res) = res { + return Ok(res); + } + } + Err(ctx::Canceled) + } +} diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 2b88dbb2..e8bf588c 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -12,42 +12,38 @@ //! Static connections constitute a rigid "backbone" of the gossip network, which is insensitive to //! eclipse attack. Dynamic connections are supposed to improve the properties of the gossip //! network graph (minimize its diameter, increase connectedness). -use crate::{gossip::ValidatorAddrsWatch, io, pool::PoolWatch, rpc, Config}; -use anyhow::Context as _; +use crate::{gossip::ValidatorAddrsWatch, io, pool::PoolWatch, Config}; use std::sync::{atomic::AtomicUsize, Arc}; +pub(crate) use validator_addrs::*; +use zksync_concurrency::{ctx, ctx::channel, scope, sync}; +use zksync_consensus_roles::{node, validator}; +use zksync_consensus_storage::BlockStore; +mod fetch; mod handshake; mod runner; #[cfg(test)] +mod testonly; +#[cfg(test)] mod tests; mod validator_addrs; -pub(crate) use validator_addrs::*; -use zksync_concurrency::{ctx, ctx::channel}; -use zksync_consensus_roles::{node, validator}; -use zksync_consensus_storage::BlockStore; -use zksync_protobuf::kB; - -/// State of the gossip connection. -pub(crate) struct Connection { - /// `get_block` rpc client. - pub(crate) get_block: rpc::Client, -} - /// Gossip network state. pub(crate) struct Network { /// Gossip network configuration. pub(crate) cfg: Config, /// Currently open inbound connections. - pub(crate) inbound: PoolWatch>, + pub(crate) inbound: PoolWatch, /// Currently open outbound connections. - pub(crate) outbound: PoolWatch>, + pub(crate) outbound: PoolWatch, /// Current state of knowledge about validators' endpoints. pub(crate) validator_addrs: ValidatorAddrsWatch, /// Block store to serve `get_block` requests from. pub(crate) block_store: Arc, /// Output pipe of the network actor. pub(crate) sender: channel::UnboundedSender, + /// Queue of block fetching requests. + pub(crate) fetch_queue: fetch::Queue, /// TESTONLY: how many time push_validator_addrs rpc was called by the peers. pub(crate) push_validator_addrs_calls: AtomicUsize, } @@ -67,8 +63,9 @@ impl Network { ), outbound: PoolWatch::new(cfg.gossip.static_outbound.keys().cloned().collect(), 0), validator_addrs: ValidatorAddrsWatch::default(), - block_store, cfg, + fetch_queue: fetch::Queue::default(), + block_store, push_validator_addrs_calls: 0.into(), }) } @@ -78,26 +75,32 @@ impl Network { self.block_store.genesis() } - /// Sends a GetBlock RPC to the given peer. - pub(crate) async fn get_block( - &self, - ctx: &ctx::Ctx, - recipient: &node::PublicKey, - number: validator::BlockNumber, - ) -> anyhow::Result> { - let outbound = self.outbound.current(); - let inbound = self.inbound.current(); - Ok(outbound - .get(recipient) - .or(inbound.get(recipient)) - .context("recipient is unreachable")? - .get_block - .call( - ctx, - &rpc::get_block::Req(number), - self.cfg.max_block_size.saturating_add(kB), - ) - .await? - .0) + /// Task fetching blocks from peers which are not present in storage. + pub(crate) async fn run_block_fetcher(&self, ctx: &ctx::Ctx) { + let sem = sync::Semaphore::new(self.cfg.max_block_queue_size); + let _: ctx::OrCanceled<()> = scope::run!(ctx, |ctx, s| async { + let mut next = self.block_store.queued().next(); + loop { + let permit = sync::acquire(ctx, &sem).await?; + let number = ctx::NoCopy(next); + next = next + 1; + // Fetch a block asynchronously. + s.spawn(async { + let _permit = permit; + let number = number.into(); + let _: ctx::OrCanceled<()> = scope::run!(ctx, |ctx, s| async { + s.spawn_bg(self.fetch_queue.request(ctx, number)); + // Cancel fetching as soon as block is queued for storage. + self.block_store.wait_until_queued(ctx, number).await?; + Err(ctx::Canceled) + }) + .await; + // Wait until the block is actually persisted, so that the amount of blocks + // stored in memory is bounded. + self.block_store.wait_until_persisted(ctx, number).await + }); + } + }) + .await; } } diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index f810d1a6..10e06755 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -1,13 +1,12 @@ -use super::{handshake, Connection, Network, ValidatorAddrs}; -use crate::{io, noise, preface, rpc}; +use super::{handshake, Network, ValidatorAddrs}; +use crate::{noise, preface, rpc}; use anyhow::Context as _; use async_trait::async_trait; use rand::seq::SliceRandom; -use std::sync::{atomic::Ordering, Arc}; -use tracing::Instrument as _; -use zksync_concurrency::{ctx, net, oneshot, scope, sync}; +use std::sync::atomic::Ordering; +use zksync_concurrency::{ctx, net, scope, sync}; use zksync_consensus_roles::node; -use zksync_consensus_storage::BlockStore; +use zksync_consensus_storage::{BlockStore, BlockStoreState}; use zksync_protobuf::kB; struct PushValidatorAddrsServer<'a>(&'a Network); @@ -33,32 +32,36 @@ impl rpc::Handler for PushValidatorAddrsServer<' } } -#[derive(Clone, Copy)] struct PushBlockStoreStateServer<'a> { - peer: &'a node::PublicKey, + state: sync::watch::Sender, net: &'a Network, } +impl<'a> PushBlockStoreStateServer<'a> { + fn new(net: &'a Network) -> Self { + Self { + state: sync::watch::channel(BlockStoreState { + first: net.genesis().fork.first_block, + last: None, + }) + .0, + net, + } + } +} + #[async_trait] -impl rpc::Handler for PushBlockStoreStateServer<'_> { +impl rpc::Handler for &PushBlockStoreStateServer<'_> { fn max_req_size(&self) -> usize { 10 * kB } async fn handle( &self, - ctx: &ctx::Ctx, + _ctx: &ctx::Ctx, req: rpc::push_block_store_state::Req, ) -> anyhow::Result<()> { - let (response, response_receiver) = oneshot::channel(); - let message = io::SyncBlocksRequest::UpdatePeerSyncState { - peer: self.peer.clone(), - state: req.0, - response, - }; - self.net.sender.send(message.into()); - // TODO(gprusak): disconnection means that the message was rejected OR - // that `SyncBlocks` actor is missing (in tests), which leads to unnecessary disconnects. - let _ = response_receiver.recv_or_disconnected(ctx).await?; + req.0.verify(self.net.genesis())?; + self.state.send_replace(req.0); Ok(()) } } @@ -79,13 +82,7 @@ impl rpc::Handler for &BlockStore { impl Network { /// Manages lifecycle of a single connection. - async fn run_stream( - &self, - ctx: &ctx::Ctx, - peer: &node::PublicKey, - stream: noise::Stream, - conn: &Connection, - ) -> anyhow::Result<()> { + async fn run_stream(&self, ctx: &ctx::Ctx, stream: noise::Stream) -> anyhow::Result<()> { let push_validator_addrs_client = rpc::Client::::new( ctx, self.cfg.rpc.push_validator_addrs_rate, @@ -95,22 +92,26 @@ impl Network { ctx, self.cfg.rpc.push_block_store_state_rate, ); - let push_block_store_state_server = PushBlockStoreStateServer { peer, net: self }; + let push_block_store_state_server = PushBlockStoreStateServer::new(self); + let get_block_client = + rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate); scope::run!(ctx, |ctx, s| async { let mut service = rpc::Service::new() .add_client(&push_validator_addrs_client) .add_server( + ctx, push_validator_addrs_server, self.cfg.rpc.push_validator_addrs_rate, ) .add_client(&push_block_store_state_client) .add_server( - push_block_store_state_server, + ctx, + &push_block_store_state_server, self.cfg.rpc.push_block_store_state_rate, ) - .add_client(&conn.get_block) - .add_server(&*self.block_store, self.cfg.rpc.get_block_rate) - .add_server(rpc::ping::Server, rpc::ping::RATE); + .add_client(&get_block_client) + .add_server(ctx, &*self.block_store, self.cfg.rpc.get_block_rate) + .add_server(ctx, rpc::ping::Server, rpc::ping::RATE); if let Some(ping_timeout) = &self.cfg.ping_timeout { let ping_client = rpc::Client::::new(ctx, rpc::ping::RATE); @@ -134,8 +135,8 @@ impl Network { } }); + // Push validator addrs updates to peer. s.spawn::<()>(async { - // Push validator addrs updates to peer. let mut old = ValidatorAddrs::default(); let mut sub = self.validator_addrs.subscribe(); sub.mark_changed(); @@ -151,6 +152,51 @@ impl Network { } }); + // Perform get_block calls to peer. + s.spawn::<()>(async { + let state = &mut push_block_store_state_server.state.subscribe(); + loop { + let call = get_block_client.reserve(ctx).await?; + let (req, send_resp) = self.fetch_queue.accept(ctx, state).await?; + let req = rpc::get_block::Req(req); + s.spawn(async { + let req = req; + // Failing to fetch a block causes a disconnect: + // - peer predeclares which blocks are available and race condition + // with block pruning should be very rare, so we can consider + // an empty response to be offending + // - a stream for the call has been already reserved, + // so the peer is expected to answer immediately. The timeout + // should be high enough to accommodate network hiccups + // - a disconnect is not a ban, so the peer is free to try to + // reconnect. + async { + let ctx_with_timeout = + self.cfg.rpc.get_block_timeout.map(|t| ctx.with_timeout(t)); + let ctx = ctx_with_timeout.as_ref().unwrap_or(ctx); + let block = call + .call(ctx, &req, self.cfg.max_block_size.saturating_add(kB)) + .await? + .0 + .context("empty response")?; + anyhow::ensure!(block.number() == req.0, "received wrong block"); + // Storing the block will fail in case block is invalid. + self.block_store + .queue_block(ctx, block) + .await + .context("queue_block()")?; + tracing::info!("fetched block {}", req.0); + // Send a response that fetching was successful. + // Ignore disconnection error. + let _ = send_resp.send(()); + anyhow::Ok(()) + } + .await + .with_context(|| format!("get_block({})", req.0)) + }); + } + }); + service.run(ctx, stream).await?; Ok(()) }) @@ -168,11 +214,8 @@ impl Network { let peer = handshake::inbound(ctx, &self.cfg.gossip, self.genesis().hash(), &mut stream).await?; tracing::info!("peer = {peer:?}"); - let conn = Arc::new(Connection { - get_block: rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate), - }); - self.inbound.insert(peer.clone(), conn.clone()).await?; - let res = self.run_stream(ctx, &peer, stream, &conn).await; + self.inbound.insert(peer.clone(), ()).await?; + let res = self.run_stream(ctx, stream).await; self.inbound.remove(&peer).await; res } @@ -202,14 +245,8 @@ impl Network { ) .await?; tracing::info!("peer = {peer:?}"); - let conn = Arc::new(Connection { - get_block: rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate), - }); - self.outbound.insert(peer.clone(), conn.clone()).await?; - let res = self - .run_stream(ctx, peer, stream, &conn) - .instrument(tracing::info_span!("out", ?addr)) - .await; + self.outbound.insert(peer.clone(), ()).await?; + let res = self.run_stream(ctx, stream).await; self.outbound.remove(peer).await; res } diff --git a/node/actors/network/src/gossip/testonly.rs b/node/actors/network/src/gossip/testonly.rs new file mode 100644 index 00000000..fcf8bee4 --- /dev/null +++ b/node/actors/network/src/gossip/testonly.rs @@ -0,0 +1,147 @@ +#![allow(dead_code)] +use super::*; +use crate::{frame, mux, noise, preface, rpc, Config, GossipConfig}; +use anyhow::Context as _; +use rand::Rng as _; +use std::collections::BTreeMap; +use zksync_concurrency::{ctx, limiter}; +use zksync_consensus_roles::validator; + +/// Connection. +pub(super) struct Conn { + accept: BTreeMap>, + connect: BTreeMap>, +} + +/// Background task of the connection. +pub(super) struct ConnRunner { + mux: mux::Mux, + stream: noise::Stream, +} + +impl ConnRunner { + /// Runs the background task of the connection. + pub(super) async fn run(self, ctx: &ctx::Ctx) -> Result<(), mux::RunError> { + self.mux.run(ctx, self.stream).await + } +} + +fn mux_entry(ctx: &ctx::Ctx) -> (mux::CapabilityId, Arc) { + ( + R::CAPABILITY_ID, + mux::StreamQueue::new(ctx, R::INFLIGHT, limiter::Rate::INF), + ) +} + +/// Establishes an anonymous gossipnet connection to a peer. +pub(super) async fn connect( + ctx: &ctx::Ctx, + peer: &Config, + genesis: validator::GenesisHash, +) -> ctx::Result<(Conn, ConnRunner)> { + assert!(peer.gossip.dynamic_inbound_limit > 0); + let addr = peer + .public_addr + .resolve(ctx) + .await? + .context("peer.public_addr.resolve()")?[0]; + let mut stream = preface::connect(ctx, addr, preface::Endpoint::GossipNet) + .await + .context("preface::connect()")?; + let cfg = GossipConfig { + key: ctx.rng().gen(), + dynamic_inbound_limit: 0, + static_outbound: [].into(), + static_inbound: [].into(), + }; + handshake::outbound(ctx, &cfg, genesis, &mut stream, &peer.gossip.key.public()) + .await + .context("handshake::outbound()")?; + let conn = Conn { + accept: [ + mux_entry::(ctx), + mux_entry::(ctx), + ] + .into(), + connect: [ + mux_entry::(ctx), + mux_entry::(ctx), + ] + .into(), + }; + let mux = mux::Mux { + cfg: Arc::new(rpc::MUX_CONFIG.clone()), + accept: conn.accept.clone(), + connect: conn.connect.clone(), + }; + Ok((conn, ConnRunner { mux, stream })) +} + +impl Conn { + /// Opens a server-side stream. + pub(super) async fn open_server( + &self, + ctx: &ctx::Ctx, + ) -> ctx::OrCanceled> { + Ok(ServerStream( + self.connect + .get(&R::CAPABILITY_ID) + .unwrap() + .open(ctx) + .await?, + std::marker::PhantomData, + )) + } + + /// Opens a client-side stream. + pub(super) async fn open_client( + &self, + ctx: &ctx::Ctx, + ) -> ctx::OrCanceled> { + Ok(ClientStream( + self.accept + .get(&R::CAPABILITY_ID) + .unwrap() + .open(ctx) + .await?, + std::marker::PhantomData, + )) + } +} + +/// Client side stream. +pub(super) struct ClientStream(mux::Stream, std::marker::PhantomData); +/// Server side stream. +pub(super) struct ServerStream(mux::Stream, std::marker::PhantomData); + +impl ClientStream { + /// Sends a request. + pub(super) async fn send(&mut self, ctx: &ctx::Ctx, req: &R::Req) -> anyhow::Result<()> { + frame::mux_send_proto(ctx, &mut self.0.write, req).await?; + self.0.write.flush(ctx).await?; + Ok(()) + } + + /// Receives a response. + pub(super) async fn recv(mut self, ctx: &ctx::Ctx) -> anyhow::Result { + Ok(frame::mux_recv_proto(ctx, &mut self.0.read, usize::MAX) + .await? + .0) + } +} + +impl ServerStream { + /// Sends a response. + pub(super) async fn send(mut self, ctx: &ctx::Ctx, resp: &R::Resp) -> anyhow::Result<()> { + frame::mux_send_proto(ctx, &mut self.0.write, resp).await?; + self.0.write.flush(ctx).await?; + Ok(()) + } + + /// Receives a request. + pub(super) async fn recv(&mut self, ctx: &ctx::Ctx) -> anyhow::Result { + Ok(frame::mux_recv_proto(ctx, &mut self.0.read, usize::MAX) + .await? + .0) + } +} diff --git a/node/actors/network/src/gossip/tests/fetch.rs b/node/actors/network/src/gossip/tests/fetch.rs new file mode 100644 index 00000000..e4cba22d --- /dev/null +++ b/node/actors/network/src/gossip/tests/fetch.rs @@ -0,0 +1,313 @@ +//! Unit tests of `get_block` RPC. +use crate::{gossip, mux, rpc}; +use assert_matches::assert_matches; +use rand::Rng as _; +use tracing::Instrument as _; +use zksync_concurrency::{ctx, limiter, scope, testonly::abort_on_panic}; +use zksync_consensus_roles::validator; +use zksync_consensus_storage::{testonly::new_store, BlockStoreState}; + +#[tokio::test] +async fn test_simple() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut setup = validator::testonly::Setup::new(rng, 1); + setup.push_blocks(rng, 2); + let mut cfg = crate::testonly::new_configs(rng, &setup, 0)[0].clone(); + cfg.rpc.push_block_store_state_rate = limiter::Rate::INF; + cfg.rpc.get_block_rate = limiter::Rate::INF; + cfg.rpc.get_block_timeout = None; + cfg.validator_key = None; + + scope::run!(ctx, |ctx, s| async { + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (_node, runner) = crate::testonly::Instance::new(cfg.clone(), store.clone()); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node"))); + + let (conn, runner) = gossip::testonly::connect(ctx, &cfg, setup.genesis.hash()) + .await + .unwrap(); + s.spawn_bg(async { + assert_matches!(runner.run(ctx).await, Err(mux::RunError::Canceled(_))); + Ok(()) + }); + + tracing::info!("Store is empty so requesting a block should return an empty response."); + let mut stream = conn.open_client::(ctx).await.unwrap(); + stream + .send(ctx, &rpc::get_block::Req(setup.blocks[0].number())) + .await + .unwrap(); + let resp = stream.recv(ctx).await.unwrap(); + assert_eq!(resp.0, None); + + tracing::info!("Insert a block."); + store + .queue_block(ctx, setup.blocks[0].clone()) + .await + .unwrap(); + loop { + let mut stream = conn + .open_server::(ctx) + .await + .unwrap(); + let state = stream.recv(ctx).await.unwrap(); + stream.send(ctx, &()).await.unwrap(); + if state.0.contains(setup.blocks[0].number()) { + tracing::info!("peer reported to have a block"); + break; + } + } + tracing::info!("fetch that block."); + let mut stream = conn.open_client::(ctx).await.unwrap(); + stream + .send(ctx, &rpc::get_block::Req(setup.blocks[0].number())) + .await + .unwrap(); + let resp = stream.recv(ctx).await.unwrap(); + assert_eq!(resp.0, Some(setup.blocks[0].clone())); + + tracing::info!("Inform the peer that we have {}", setup.blocks[1].number()); + let mut stream = conn + .open_client::(ctx) + .await + .unwrap(); + stream + .send( + ctx, + &rpc::push_block_store_state::Req(BlockStoreState { + first: setup.blocks[1].number(), + last: Some(setup.blocks[1].justification.clone()), + }), + ) + .await + .unwrap(); + stream.recv(ctx).await.unwrap(); + + tracing::info!("Wait for the client to request that block"); + let mut stream = conn.open_server::(ctx).await.unwrap(); + let req = stream.recv(ctx).await.unwrap(); + assert_eq!(req.0, setup.blocks[1].number()); + + tracing::info!("Return the requested block"); + stream + .send(ctx, &rpc::get_block::Resp(Some(setup.blocks[1].clone()))) + .await + .unwrap(); + + tracing::info!("Wait for the client to store that block"); + store + .wait_until_persisted(ctx, setup.blocks[1].number()) + .await + .unwrap(); + + Ok(()) + }) + .await + .unwrap(); +} + +#[tokio::test] +async fn test_concurrent_requests() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut setup = validator::testonly::Setup::new(rng, 1); + setup.push_blocks(rng, 10); + let mut cfg = crate::testonly::new_configs(rng, &setup, 0)[0].clone(); + cfg.rpc.push_block_store_state_rate = limiter::Rate::INF; + cfg.rpc.get_block_rate = limiter::Rate::INF; + cfg.rpc.get_block_timeout = None; + cfg.validator_key = None; + cfg.max_block_queue_size = setup.blocks.len(); + + scope::run!(ctx, |ctx, s| async { + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (_node, runner) = crate::testonly::Instance::new(cfg.clone(), store.clone()); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node"))); + + let mut conns = vec![]; + for _ in 0..4 { + let (conn, runner) = gossip::testonly::connect(ctx, &cfg, setup.genesis.hash()) + .await + .unwrap(); + s.spawn_bg(async { + assert_matches!(runner.run(ctx).await, Err(mux::RunError::Canceled(_))); + Ok(()) + }); + let mut stream = conn + .open_client::(ctx) + .await + .unwrap(); + stream + .send( + ctx, + &rpc::push_block_store_state::Req(BlockStoreState { + first: setup.blocks[0].number(), + last: Some(setup.blocks.last().unwrap().justification.clone()), + }), + ) + .await + .unwrap(); + stream.recv(ctx).await.unwrap(); + conns.push(conn); + } + + // Receive a bunch of concurrent requests on various connections. + let mut streams = vec![]; + for (i, block) in setup.blocks.iter().enumerate() { + let mut stream = conns[i % conns.len()] + .open_server::(ctx) + .await + .unwrap(); + let req = stream.recv(ctx).await.unwrap(); + assert_eq!(req.0, block.number()); + streams.push(stream); + } + + // Respond to the requests. + for (i, stream) in streams.into_iter().enumerate() { + stream + .send(ctx, &rpc::get_block::Resp(Some(setup.blocks[i].clone()))) + .await + .unwrap(); + } + Ok(()) + }) + .await + .unwrap(); +} + +#[tokio::test] +async fn test_bad_responses() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut setup = validator::testonly::Setup::new(rng, 1); + setup.push_blocks(rng, 2); + let mut cfg = crate::testonly::new_configs(rng, &setup, 0)[0].clone(); + cfg.rpc.push_block_store_state_rate = limiter::Rate::INF; + cfg.rpc.get_block_rate = limiter::Rate::INF; + cfg.rpc.get_block_timeout = None; + cfg.validator_key = None; + + scope::run!(ctx, |ctx, s| async { + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (_node, runner) = crate::testonly::Instance::new(cfg.clone(), store.clone()); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node"))); + + let state = rpc::push_block_store_state::Req(BlockStoreState { + first: setup.blocks[0].number(), + last: Some(setup.blocks[0].justification.clone()), + }); + + for resp in [ + // Empty response even though we declared to have the block. + None, + // Wrong block. + Some(setup.blocks[1].clone()), + // Malformed block. + { + let mut b = setup.blocks[0].clone(); + b.justification = rng.gen(); + Some(b) + }, + ] { + tracing::info!("bad response = {resp:?}"); + + tracing::info!("Connect to peer"); + let (conn, runner) = gossip::testonly::connect(ctx, &cfg, setup.genesis.hash()) + .await + .unwrap(); + let conn_task = s.spawn_bg(async { Ok(runner.run(ctx).await) }); + + tracing::info!("Inform the peer about the block that we possess"); + let mut stream = conn + .open_client::(ctx) + .await + .unwrap(); + stream.send(ctx, &state).await.unwrap(); + stream.recv(ctx).await.unwrap(); + + tracing::info!("Wait for the client to request that block"); + let mut stream = conn.open_server::(ctx).await.unwrap(); + let req = stream.recv(ctx).await.unwrap(); + assert_eq!(req.0, setup.blocks[0].number()); + + tracing::info!("Return a bad response"); + stream.send(ctx, &rpc::get_block::Resp(resp)).await.unwrap(); + + tracing::info!("Wait for the peer to drop the connection"); + assert_matches!( + conn_task.join(ctx).await.unwrap(), + Err(mux::RunError::Closed) + ); + } + Ok(()) + }) + .await + .unwrap(); +} + +#[tokio::test] +async fn test_retry() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut setup = validator::testonly::Setup::new(rng, 1); + setup.push_blocks(rng, 1); + let mut cfg = crate::testonly::new_configs(rng, &setup, 0)[0].clone(); + cfg.rpc.push_block_store_state_rate = limiter::Rate::INF; + cfg.rpc.get_block_rate = limiter::Rate::INF; + cfg.rpc.get_block_timeout = None; + cfg.validator_key = None; + + scope::run!(ctx, |ctx, s| async { + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (_node, runner) = crate::testonly::Instance::new(cfg.clone(), store.clone()); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node"))); + + let state = rpc::push_block_store_state::Req(BlockStoreState { + first: setup.blocks[0].number(), + last: Some(setup.blocks[0].justification.clone()), + }); + + tracing::info!("establish a bunch of connections"); + let mut conns = vec![]; + for _ in 0..4 { + let (conn, runner) = gossip::testonly::connect(ctx, &cfg, setup.genesis.hash()) + .await + .unwrap(); + let task = s.spawn_bg(async { Ok(runner.run(ctx).await) }); + let mut stream = conn + .open_client::(ctx) + .await + .unwrap(); + stream.send(ctx, &state).await.unwrap(); + stream.recv(ctx).await.unwrap(); + conns.push((conn, task)); + } + + for (conn, task) in conns { + tracing::info!("Wait for the client to request a block"); + let mut stream = conn.open_server::(ctx).await.unwrap(); + let req = stream.recv(ctx).await.unwrap(); + assert_eq!(req.0, setup.blocks[0].number()); + + tracing::info!("Return a bad response"); + stream.send(ctx, &rpc::get_block::Resp(None)).await.unwrap(); + + tracing::info!("Wait for the peer to drop the connection"); + assert_matches!(task.join(ctx).await.unwrap(), Err(mux::RunError::Closed)); + } + + Ok(()) + }) + .await + .unwrap(); +} diff --git a/node/actors/network/src/gossip/tests.rs b/node/actors/network/src/gossip/tests/mod.rs similarity index 67% rename from node/actors/network/src/gossip/tests.rs rename to node/actors/network/src/gossip/tests/mod.rs index 40a50875..a8ba0cec 100644 --- a/node/actors/network/src/gossip/tests.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -1,5 +1,6 @@ use super::*; -use crate::{io, metrics, preface, rpc, testonly}; +use crate::{metrics, preface, rpc, testonly}; +use anyhow::Context as _; use assert_matches::assert_matches; use pretty_assertions::assert_eq; use rand::Rng; @@ -7,16 +8,18 @@ use std::{ collections::{HashMap, HashSet}, sync::{atomic::Ordering, Arc}, }; -use test_casing::{test_casing, Product}; use tracing::Instrument as _; use zksync_concurrency::{ - ctx, net, oneshot, scope, sync, + ctx, net, scope, sync, testonly::{abort_on_panic, set_timeout}, time, }; -use zksync_consensus_roles::validator::{self, BlockNumber, FinalBlock}; +use zksync_consensus_roles::validator; use zksync_consensus_storage::testonly::new_store; +mod fetch; +mod syncing; + #[tokio::test] async fn test_one_connection_per_node() { abort_on_panic(); @@ -311,210 +314,6 @@ async fn test_genesis_mismatch() { .unwrap(); } -const EXCHANGED_STATE_COUNT: usize = 5; -const NETWORK_CONNECTIVITY_CASES: [(usize, usize); 5] = [(2, 1), (3, 2), (5, 3), (10, 4), (10, 7)]; - -/// Tests block syncing with global network synchronization (a next block becoming available -/// to all nodes only after all nodes have received previous `SyncState` updates from peers). -#[test_casing(5, NETWORK_CONNECTIVITY_CASES)] -#[tokio::test(flavor = "multi_thread")] -#[tracing::instrument(level = "trace")] -async fn syncing_blocks(node_count: usize, gossip_peers: usize) { - abort_on_panic(); - let _guard = set_timeout(time::Duration::seconds(5)); - - let ctx = &ctx::test_root(&ctx::AffineClock::new(20.0)); - let rng = &mut ctx.rng(); - let mut setup = validator::testonly::Setup::new(rng, node_count); - setup.push_blocks(rng, EXCHANGED_STATE_COUNT); - let cfgs = testonly::new_configs(rng, &setup, gossip_peers); - scope::run!(ctx, |ctx, s| async { - let mut nodes = vec![]; - for (i, cfg) in cfgs.into_iter().enumerate() { - let (store, runner) = new_store(ctx, &setup.genesis).await; - s.spawn_bg(runner.run(ctx)); - let (node, runner) = testonly::Instance::new(cfg, store); - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); - nodes.push(node); - } - for block in &setup.blocks { - for node in &nodes { - node.net - .gossip - .block_store - .queue_block(ctx, block.clone()) - .await - .context("queue_block()")?; - } - for node in &mut nodes { - wait_for_updates(ctx, node, gossip_peers, block).await?; - } - } - Ok(()) - }) - .await - .unwrap(); -} - -async fn wait_for_updates( - ctx: &ctx::Ctx, - node: &mut testonly::Instance, - peer_count: usize, - block: &FinalBlock, -) -> anyhow::Result<()> { - let mut updates = HashSet::new(); - while updates.len() < peer_count { - let io::OutputMessage::SyncBlocks(io::SyncBlocksRequest::UpdatePeerSyncState { - peer, - state, - response, - }) = node.pipe.recv(ctx).await.context("pipe.recv()")? - else { - continue; - }; - if state.last.as_ref() == Some(&block.justification) { - updates.insert(peer); - } - response.send(()).ok(); - } - Ok(()) -} - -/// Tests block syncing in an uncoordinated network, in which new blocks arrive at a schedule. -/// In this case, some nodes may skip emitting initial / intermediate updates to peers, so we -/// only assert that all peers for all nodes emit the final update. -#[test_casing(10, Product(( - NETWORK_CONNECTIVITY_CASES, - [time::Duration::seconds(1), time::Duration::seconds(10)], -)))] -#[tokio::test(flavor = "multi_thread")] -#[tracing::instrument(level = "trace")] -async fn uncoordinated_block_syncing( - (node_count, gossip_peers): (usize, usize), - state_generation_interval: time::Duration, -) { - abort_on_panic(); - let _guard = set_timeout(time::Duration::seconds(5)); - - let ctx = &ctx::test_root(&ctx::AffineClock::new(20.0)); - let rng = &mut ctx.rng(); - let mut setup = validator::testonly::Setup::new(rng, node_count); - setup.push_blocks(rng, EXCHANGED_STATE_COUNT); - scope::run!(ctx, |ctx, s| async { - for (i, cfg) in testonly::new_configs(rng, &setup, gossip_peers) - .into_iter() - .enumerate() - { - let i = i; - let (store, runner) = new_store(ctx, &setup.genesis).await; - s.spawn_bg(runner.run(ctx)); - let (node, runner) = testonly::Instance::new(cfg, store.clone()); - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); - s.spawn(async { - let store = store; - for block in &setup.blocks { - ctx.sleep(state_generation_interval).await?; - store.queue_block(ctx, block.clone()).await.unwrap(); - } - Ok(()) - }); - s.spawn(async { - let mut node = node; - wait_for_updates(ctx, &mut node, gossip_peers, setup.blocks.last().unwrap()).await - }); - } - Ok(()) - }) - .await - .unwrap(); -} - -#[test_casing(5, NETWORK_CONNECTIVITY_CASES)] -#[tokio::test] -async fn getting_blocks_from_peers(node_count: usize, gossip_peers: usize) { - abort_on_panic(); - - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let mut setup = validator::testonly::Setup::new(rng, node_count); - setup.push_blocks(rng, 1); - let cfgs = testonly::new_configs(rng, &setup, gossip_peers); - - // All inbound and outbound peers should answer the request. - let expected_successful_responses = (2 * gossip_peers).min(node_count - 1); - - scope::run!(ctx, |ctx, s| async { - let (store, runner) = new_store(ctx, &setup.genesis).await; - s.spawn_bg(runner.run(ctx)); - store - .queue_block(ctx, setup.blocks[0].clone()) - .await - .unwrap(); - - let mut nodes: Vec<_> = cfgs - .into_iter() - .enumerate() - .map(|(i, cfg)| { - let (node, runner) = testonly::Instance::new(cfg, store.clone()); - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); - node - }) - .collect(); - - for node in &nodes { - node.wait_for_gossip_connections().await; - tracing::info!("establish connections"); - let mut successful_peer_responses = 0; - for peer in &nodes { - let (response, response_receiver) = oneshot::channel(); - node.pipe.send( - io::SyncBlocksInputMessage::GetBlock { - recipient: peer.net.gossip.cfg.gossip.key.public(), - number: setup.blocks[0].header().number, - response, - } - .into(), - ); - tracing::info!("wait for response"); - if let Ok(block) = response_receiver.recv(ctx).await? { - assert_eq!(block, setup.blocks[0]); - successful_peer_responses += 1; - } - } - assert_eq!(successful_peer_responses, expected_successful_responses); - } - - tracing::info!("stop the last node"); - let last = nodes.pop().unwrap(); - last.terminate(ctx).await?; - - let stopped_node_key = last.net.gossip.cfg.gossip.key.public(); - for node in &nodes { - tracing::info!("wait for disconnection"); - node.wait_for_gossip_disconnect(ctx, &stopped_node_key) - .await - .unwrap(); - - tracing::info!("wait for disconnection"); - // Check that the node cannot access the stopped peer. - let (response, response_receiver) = oneshot::channel(); - node.pipe.send( - io::SyncBlocksInputMessage::GetBlock { - recipient: stopped_node_key.clone(), - number: BlockNumber(1), - response, - } - .into(), - ); - assert!(response_receiver.recv(ctx).await?.is_err()); - } - - Ok(()) - }) - .await - .unwrap(); -} - /// When validator node is restarted, it should immediately override /// the AccountData that is present in the network from the previous run. #[tokio::test] diff --git a/node/actors/network/src/gossip/tests/syncing.rs b/node/actors/network/src/gossip/tests/syncing.rs new file mode 100644 index 00000000..44922ada --- /dev/null +++ b/node/actors/network/src/gossip/tests/syncing.rs @@ -0,0 +1,304 @@ +//! Integration tests of block synchronization. +use crate::testonly; +use anyhow::Context as _; +use rand::seq::SliceRandom as _; +use test_casing::{test_casing, Product}; +use tracing::Instrument as _; +use zksync_concurrency::{ + ctx, limiter, scope, + testonly::{abort_on_panic, set_timeout}, + time, +}; +use zksync_consensus_roles::validator; +use zksync_consensus_storage::testonly::{new_store, new_store_with_first}; + +const EXCHANGED_STATE_COUNT: usize = 5; +const NETWORK_CONNECTIVITY_CASES: [(usize, usize); 5] = [(2, 1), (3, 2), (5, 3), (10, 4), (10, 7)]; + +/// Tests block syncing with global network synchronization (a next block becoming available +/// on some node only after nodes have received the previous block. +#[test_casing(5, NETWORK_CONNECTIVITY_CASES)] +#[tokio::test(flavor = "multi_thread")] +async fn coordinated_block_syncing(node_count: usize, gossip_peers: usize) { + abort_on_panic(); + let _guard = set_timeout(time::Duration::seconds(20)); + + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut setup = validator::testonly::Setup::new(rng, node_count); + setup.push_blocks(rng, EXCHANGED_STATE_COUNT); + let cfgs = testonly::new_configs(rng, &setup, gossip_peers); + scope::run!(ctx, |ctx, s| async { + let mut nodes = vec![]; + for (i, mut cfg) in cfgs.into_iter().enumerate() { + cfg.rpc.push_block_store_state_rate = limiter::Rate::INF; + cfg.rpc.get_block_rate = limiter::Rate::INF; + cfg.rpc.get_block_timeout = None; + cfg.validator_key = None; + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (node, runner) = testonly::Instance::new(cfg, store); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); + nodes.push(node); + } + for block in &setup.blocks { + nodes + .choose(rng) + .unwrap() + .net + .gossip + .block_store + .queue_block(ctx, block.clone()) + .await + .context("queue_block()")?; + for node in &nodes { + node.net + .gossip + .block_store + .wait_until_persisted(ctx, block.number()) + .await + .unwrap(); + } + } + Ok(()) + }) + .await + .unwrap(); +} + +/// Tests block syncing in an uncoordinated network, in which new blocks arrive at a schedule. +#[test_casing(10, Product(( + NETWORK_CONNECTIVITY_CASES, + [time::Duration::milliseconds(50), time::Duration::milliseconds(500)], +)))] +#[tokio::test(flavor = "multi_thread")] +async fn uncoordinated_block_syncing( + (node_count, gossip_peers): (usize, usize), + state_generation_interval: time::Duration, +) { + abort_on_panic(); + let _guard = set_timeout(time::Duration::seconds(20)); + + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut setup = validator::testonly::Setup::new(rng, node_count); + setup.push_blocks(rng, EXCHANGED_STATE_COUNT); + let cfgs = testonly::new_configs(rng, &setup, gossip_peers); + scope::run!(ctx, |ctx, s| async { + let mut nodes = vec![]; + for (i, mut cfg) in cfgs.into_iter().enumerate() { + cfg.rpc.push_block_store_state_rate = limiter::Rate::INF; + cfg.rpc.get_block_rate = limiter::Rate::INF; + cfg.rpc.get_block_timeout = None; + cfg.validator_key = None; + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (node, runner) = testonly::Instance::new(cfg, store); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); + nodes.push(node); + } + for block in &setup.blocks { + nodes + .choose(rng) + .unwrap() + .net + .gossip + .block_store + .queue_block(ctx, block.clone()) + .await + .context("queue_block()")?; + ctx.sleep(state_generation_interval).await?; + } + let last = setup.blocks.last().unwrap().number(); + for node in &nodes { + node.net + .gossip + .block_store + .wait_until_persisted(ctx, last) + .await + .unwrap(); + } + Ok(()) + }) + .await + .unwrap(); +} + +/// Test concurrently adding new nodes and new blocks to the network. +#[tokio::test(flavor = "multi_thread")] +async fn test_switching_on_nodes() { + abort_on_panic(); + let _guard = set_timeout(time::Duration::seconds(20)); + + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut setup = validator::testonly::Setup::new(rng, 7); + // It is important that all nodes will connect to each other, + // because we spawn the nodes gradually and we want the network + // to be connected at all times. + let cfgs = testonly::new_configs(rng, &setup, setup.keys.len()); + setup.push_blocks(rng, cfgs.len()); + scope::run!(ctx, |ctx, s| async { + let mut nodes = vec![]; + for (i, mut cfg) in cfgs.into_iter().enumerate() { + // Spawn another node. + cfg.rpc.push_block_store_state_rate = limiter::Rate::INF; + cfg.rpc.get_block_rate = limiter::Rate::INF; + cfg.rpc.get_block_timeout = None; + cfg.validator_key = None; + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (node, runner) = testonly::Instance::new(cfg, store); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); + nodes.push(node); + + // Insert a block to storage of a random node. + nodes + .choose(rng) + .unwrap() + .net + .gossip + .block_store + .queue_block(ctx, setup.blocks[i].clone()) + .await + .context("queue_block()")?; + + // Wait for all the nodes to fetch the block. + for node in &nodes { + node.net + .gossip + .block_store + .wait_until_persisted(ctx, setup.blocks[i].number()) + .await + .unwrap(); + } + } + Ok(()) + }) + .await + .unwrap(); +} + +/// Test concurrently removing nodes and adding new blocks to the network. +#[tokio::test(flavor = "multi_thread")] +async fn test_switching_off_nodes() { + abort_on_panic(); + let _guard = set_timeout(time::Duration::seconds(20)); + + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut setup = validator::testonly::Setup::new(rng, 7); + // It is important that all nodes will connect to each other, + // because we spawn the nodes gradually and we want the network + // to be connected at all times. + let cfgs = testonly::new_configs(rng, &setup, setup.keys.len()); + setup.push_blocks(rng, cfgs.len()); + scope::run!(ctx, |ctx, s| async { + let mut nodes = vec![]; + for (i, mut cfg) in cfgs.into_iter().enumerate() { + // Spawn another node. + cfg.rpc.push_block_store_state_rate = limiter::Rate::INF; + cfg.rpc.get_block_rate = limiter::Rate::INF; + cfg.rpc.get_block_timeout = None; + cfg.validator_key = None; + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (node, runner) = testonly::Instance::new(cfg, store); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); + nodes.push(node); + } + nodes.shuffle(rng); + + for i in 0..nodes.len() { + // Insert a block to storage of a random node. + nodes[i..] + .choose(rng) + .unwrap() + .net + .gossip + .block_store + .queue_block(ctx, setup.blocks[i].clone()) + .await + .context("queue_block()")?; + + // Wait for all the remaining nodes to fetch the block. + for node in &nodes[i..] { + node.net + .gossip + .block_store + .wait_until_persisted(ctx, setup.blocks[i].number()) + .await + .unwrap(); + } + + // Terminate a random node. + nodes[i].terminate(ctx).await.unwrap(); + } + Ok(()) + }) + .await + .unwrap(); +} + +/// Test checking that nodes with different first block can synchronize. +#[tokio::test(flavor = "multi_thread")] +async fn test_different_first_block() { + abort_on_panic(); + let _guard = set_timeout(time::Duration::seconds(20)); + + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut setup = validator::testonly::Setup::new(rng, 4); + setup.push_blocks(rng, 10); + // It is important that all nodes will connect to each other, + // because we spawn the nodes gradually and we want the network + // to be connected at all times. + let cfgs = testonly::new_configs(rng, &setup, setup.keys.len()); + scope::run!(ctx, |ctx, s| async { + let mut nodes = vec![]; + for (i, mut cfg) in cfgs.into_iter().enumerate() { + // Spawn another node. + cfg.rpc.push_block_store_state_rate = limiter::Rate::INF; + cfg.rpc.get_block_rate = limiter::Rate::INF; + cfg.rpc.get_block_timeout = None; + cfg.validator_key = None; + // Choose the first block for the node at random. + let first = setup.blocks.choose(rng).unwrap().number(); + let (store, runner) = new_store_with_first(ctx, &setup.genesis, first).await; + s.spawn_bg(runner.run(ctx)); + let (node, runner) = testonly::Instance::new(cfg, store); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); + nodes.push(node); + } + nodes.shuffle(rng); + + for block in &setup.blocks { + // Find nodes interested in the next block. + let interested_nodes: Vec<_> = nodes + .iter() + .filter(|n| n.net.gossip.block_store.queued().first <= block.number()) + .collect(); + // Store this block to one of them. + if let Some(node) = interested_nodes.choose(rng) { + node.net + .gossip + .block_store + .queue_block(ctx, block.clone()) + .await + .unwrap(); + } + // Wait until all remaining nodes get the new block. + for node in interested_nodes { + node.net + .gossip + .block_store + .wait_until_persisted(ctx, block.number()) + .await + .unwrap(); + } + } + Ok(()) + }) + .await + .unwrap(); +} diff --git a/node/actors/network/src/io.rs b/node/actors/network/src/io.rs index 9747c8ed..9a7412f9 100644 --- a/node/actors/network/src/io.rs +++ b/node/actors/network/src/io.rs @@ -1,15 +1,12 @@ #![allow(missing_docs)] use zksync_concurrency::oneshot; -use zksync_consensus_roles::{node, validator}; -use zksync_consensus_storage::BlockStoreState; +use zksync_consensus_roles::validator; /// All the messages that other actors can send to the Network actor. #[derive(Debug)] pub enum InputMessage { /// Message types from the Consensus actor. Consensus(ConsensusInputMessage), - /// Message types from the Sync Blocks actor. - SyncBlocks(SyncBlocksInputMessage), } /// Message types from the Consensus actor. @@ -25,23 +22,6 @@ impl From for InputMessage { } } -/// Message types from the Sync Blocks actor. -#[derive(Debug)] -pub enum SyncBlocksInputMessage { - /// Request to get a block from a specific peer. - GetBlock { - recipient: node::PublicKey, - number: validator::BlockNumber, - response: oneshot::Sender>, - }, -} - -impl From for InputMessage { - fn from(message: SyncBlocksInputMessage) -> Self { - Self::SyncBlocks(message) - } -} - /// Consensus message received from the network. #[derive(Debug)] pub struct ConsensusReq { @@ -53,45 +33,11 @@ pub struct ConsensusReq { pub ack: oneshot::Sender<()>, } -/// Error returned in response to [`GetBlock`] call. -/// -/// Note that these errors don't include network-level errors, only app-level ones. -#[derive(Debug, thiserror::Error)] -pub enum GetBlockError { - /// Transient error: the node doesn't have the requested L2 block. - #[error("node doesn't have the requested L2 block")] - NotAvailable, - #[error(transparent)] - Internal(#[from] anyhow::Error), -} - -#[derive(Debug)] -pub enum SyncBlocksRequest { - /// Notifies about an update in peer's `SyncState`. - UpdatePeerSyncState { - /// Peer that has reported the update. - peer: node::PublicKey, - /// Updated peer syncing state. - state: BlockStoreState, - /// Acknowledgement response returned by the block syncing actor. - // TODO: return an error in case of invalid `SyncState`? - response: oneshot::Sender<()>, - }, -} - /// All the messages that the Network actor sends to other actors. #[derive(Debug)] pub enum OutputMessage { /// Message to the Consensus actor. Consensus(ConsensusReq), - /// Message to the block syncing actor. - SyncBlocks(SyncBlocksRequest), -} - -impl From for OutputMessage { - fn from(request: SyncBlocksRequest) -> Self { - Self::SyncBlocks(request) - } } #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index ed154333..4de3bc87 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -2,7 +2,7 @@ use anyhow::Context as _; use std::sync::Arc; use tracing::Instrument as _; -use zksync_concurrency::{ctx, ctx::channel, limiter, scope, time}; +use zksync_concurrency::{ctx, ctx::channel, limiter, scope}; use zksync_consensus_storage::BlockStore; use zksync_consensus_utils::pipe::ActorPipe; @@ -71,12 +71,9 @@ impl Network { /// Handles a dispatcher message. async fn handle_message( &self, - ctx: &ctx::Ctx, + _ctx: &ctx::Ctx, message: io::InputMessage, ) -> anyhow::Result<()> { - /// Timeout for a GetBlock RPC. - const GET_BLOCK_TIMEOUT: time::Duration = time::Duration::seconds(10); - match message { io::InputMessage::Consensus(message) => { self.consensus @@ -85,18 +82,6 @@ impl Network { .msg_pool .send(Arc::new(message)); } - io::InputMessage::SyncBlocks(io::SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) => { - let ctx = &ctx.with_timeout(GET_BLOCK_TIMEOUT); - let _ = response.send(match self.gossip.get_block(ctx, &recipient, number).await { - Ok(Some(block)) => Ok(block), - Ok(None) => Err(io::GetBlockError::NotAvailable), - Err(err) => Err(io::GetBlockError::Internal(err)), - }); - } } Ok(()) } @@ -128,6 +113,12 @@ impl Runner { Ok(()) }); + // Fetch missing blocks in the background. + s.spawn(async { + self.net.gossip.run_block_fetcher(ctx).await; + Ok(()) + }); + // Maintain static gossip connections. for (peer, addr) in &self.net.gossip.cfg.gossip.static_outbound { s.spawn::<()>(async { diff --git a/node/actors/network/src/mux/reusable_stream.rs b/node/actors/network/src/mux/reusable_stream.rs index 381cf561..83d0a943 100644 --- a/node/actors/network/src/mux/reusable_stream.rs +++ b/node/actors/network/src/mux/reusable_stream.rs @@ -4,7 +4,7 @@ use super::{ }; use crate::noise::bytes; use std::sync::Arc; -use zksync_concurrency::{ctx, ctx::channel, oneshot, scope, sync}; +use zksync_concurrency::{ctx, ctx::channel, limiter, oneshot, scope, sync}; /// Read frame allocation permit. #[derive(Debug)] @@ -66,18 +66,20 @@ impl ReservedStream { /// `queue.pop()` before the OPEN message is sent to the peer. pub(crate) struct StreamQueue { pub(super) max_streams: u32, - send: channel::UnboundedSender, - recv: sync::Mutex>, + limiter: limiter::Limiter, + send: channel::Sender, + recv: sync::Mutex>, } impl StreamQueue { /// Constructs a new StreamQueue with the specified number of reusable streams. /// During multiplexer handshake, peers exchange information about /// how many reusable streams they support per capability. - pub(crate) fn new(max_streams: u32) -> Arc { - let (send, recv) = channel::unbounded(); + pub(crate) fn new(ctx: &ctx::Ctx, max_streams: u32, rate: limiter::Rate) -> Arc { + let (send, recv) = channel::bounded(1); Arc::new(Self { max_streams, + limiter: limiter::Limiter::new(ctx, rate), send, recv: sync::Mutex::new(recv), }) @@ -91,6 +93,7 @@ impl StreamQueue { } /// Opens a transient stream from the queue. + #[allow(dead_code)] pub(crate) async fn open(&self, ctx: &ctx::Ctx) -> ctx::OrCanceled { loop { // It may happen that the popped stream has been immediately disconnected @@ -106,7 +109,7 @@ impl StreamQueue { async fn push(&self, ctx: &ctx::Ctx) -> ctx::OrCanceled { loop { let (send, recv) = oneshot::channel(); - self.send.send(ReservedStream(send)); + self.send.send(ctx, ReservedStream(send)).await?; if let Ok(reservation) = recv.recv_or_disconnected(ctx).await? { return Ok(reservation); } @@ -269,6 +272,7 @@ impl ReusableStream { let mut write = write_receiver.wait(ctx).await?; write.send_close(ctx).await?; + let _open_permit = self.stream_queue.limiter.acquire(ctx, 1).await?; let (read, reservation) = match write.stream_kind { StreamKind::ACCEPT => { let read = recv_open_task.join(ctx).await?; diff --git a/node/actors/network/src/mux/tests/mod.rs b/node/actors/network/src/mux/tests/mod.rs index 3106c047..2ae1b245 100644 --- a/node/actors/network/src/mux/tests/mod.rs +++ b/node/actors/network/src/mux/tests/mod.rs @@ -8,7 +8,7 @@ use std::{ Arc, }, }; -use zksync_concurrency::{ctx, scope, testonly::abort_on_panic}; +use zksync_concurrency::{ctx, limiter, scope, testonly::abort_on_panic}; mod proto; @@ -32,6 +32,7 @@ fn test_masks() { #[test] fn test_mux_verify() { + let ctx = &ctx::test_root(&ctx::RealClock); let cfg = Arc::new(mux::Config { read_buffer_size: 1000, read_frame_size: 100, @@ -47,8 +48,8 @@ fn test_mux_verify() { .is_ok()); let mut queues = BTreeMap::new(); - queues.insert(0, mux::StreamQueue::new(u32::MAX)); - queues.insert(1, mux::StreamQueue::new(u32::MAX)); + queues.insert(0, mux::StreamQueue::new(ctx, u32::MAX, limiter::Rate::INF)); + queues.insert(1, mux::StreamQueue::new(ctx, u32::MAX, limiter::Rate::INF)); // Total streams overflow: assert!(mux::Mux { cfg: cfg.clone(), @@ -208,10 +209,20 @@ fn mux_with_noise() { write_frame_size: 150, }), accept: (0..caps) - .map(|c| (c, mux::StreamQueue::new(rng.gen_range(1..5)))) + .map(|c| { + ( + c, + mux::StreamQueue::new(ctx, rng.gen_range(1..5), limiter::Rate::INF), + ) + }) .collect(), connect: (0..caps) - .map(|c| (c, mux::StreamQueue::new(rng.gen_range(1..5)))) + .map(|c| { + ( + c, + mux::StreamQueue::new(ctx, rng.gen_range(1..5), limiter::Rate::INF), + ) + }) .collect(), }; let mux2 = mux::Mux { @@ -222,10 +233,20 @@ fn mux_with_noise() { write_frame_size: 79, }), accept: (0..caps) - .map(|c| (c, mux::StreamQueue::new(rng.gen_range(1..5)))) + .map(|c| { + ( + c, + mux::StreamQueue::new(ctx, rng.gen_range(1..5), limiter::Rate::INF), + ) + }) .collect(), connect: (0..caps) - .map(|c| (c, mux::StreamQueue::new(rng.gen_range(1..5)))) + .map(|c| { + ( + c, + mux::StreamQueue::new(ctx, rng.gen_range(1..5), limiter::Rate::INF), + ) + }) .collect(), }; @@ -303,7 +324,7 @@ async fn test_transport_closed() { accept: BTreeMap::default(), connect: BTreeMap::default(), }; - let q = mux::StreamQueue::new(1); + let q = mux::StreamQueue::new(ctx, 1, limiter::Rate::INF); mux.connect.insert(cap, q.clone()); s.spawn_bg(async { expected(mux.run(ctx, s2).await).context("[connect] mux.run()") @@ -316,7 +337,7 @@ async fn test_transport_closed() { accept: BTreeMap::default(), connect: BTreeMap::default(), }; - let q = mux::StreamQueue::new(1); + let q = mux::StreamQueue::new(ctx, 1, limiter::Rate::INF); mux.accept.insert(cap, q.clone()); s.spawn_bg(async { expected(mux.run(ctx, s1).await).context("[accept] mux.run()") diff --git a/node/actors/network/src/rpc/mod.rs b/node/actors/network/src/rpc/mod.rs index 2153189e..2780e005 100644 --- a/node/actors/network/src/rpc/mod.rs +++ b/node/actors/network/src/rpc/mod.rs @@ -32,7 +32,8 @@ pub(crate) mod testonly; #[cfg(test)] mod tests; -const MUX_CONFIG: mux::Config = mux::Config { +/// Multiplexer configuration for the RPC services. +pub(crate) const MUX_CONFIG: mux::Config = mux::Config { read_buffer_size: 160 * zksync_protobuf::kB as u64, read_frame_size: 16 * zksync_protobuf::kB as u64, read_frame_count: 100, @@ -72,13 +73,12 @@ pub(crate) trait Rpc: Sync + Send + 'static { /// blindly decide which server to call without knowing their real capacity. /// TODO(gprusak): to actually pass around the permit, we should use an OwnedPermit /// instead. -pub(crate) struct ReservedCall<'a, R: Rpc> { +pub(crate) struct ReservedCall { stream: mux::ReservedStream, - permit: limiter::Permit<'a>, _rpc: std::marker::PhantomData, } -impl<'a, R: Rpc> ReservedCall<'a, R> { +impl ReservedCall { /// Performs the call. pub(crate) async fn call( self, @@ -88,7 +88,6 @@ impl<'a, R: Rpc> ReservedCall<'a, R> { ) -> anyhow::Result { let send_time = ctx.now(); let mut stream = self.stream.open(ctx).await??; - drop(self.permit); let res = async { let metric_labels = CallType::Client.to_labels::(req); let _guard = RPC_METRICS.inflight[&metric_labels].inc_guard(1); @@ -113,7 +112,6 @@ impl<'a, R: Rpc> ReservedCall<'a, R> { /// RPC client used to issue the calls to the server. pub(crate) struct Client { - limiter: limiter::Limiter, queue: Arc, _rpc: std::marker::PhantomData, } @@ -124,24 +122,18 @@ impl Client { // so perhaps they should be constructed by `Service::add_client` instead? pub(crate) fn new(ctx: &ctx::Ctx, rate: limiter::Rate) -> Self { Client { - limiter: limiter::Limiter::new(ctx, rate), - queue: mux::StreamQueue::new(R::INFLIGHT), + queue: mux::StreamQueue::new(ctx, R::INFLIGHT, rate), _rpc: std::marker::PhantomData, } } /// Reserves an RPC. - pub(crate) async fn reserve<'a>( - &'a self, - ctx: &'a ctx::Ctx, - ) -> ctx::OrCanceled> { + pub(crate) async fn reserve(&self, ctx: &ctx::Ctx) -> ctx::OrCanceled> { let reserve_time = ctx.now(); - let permit = self.limiter.acquire(ctx, 1).await?; let stream = self.queue.reserve(ctx).await?; RPC_METRICS.call_reserve_latency[&R::METHOD].observe_latency(ctx.now() - reserve_time); Ok(ReservedCall { stream, - permit, _rpc: std::marker::PhantomData, }) } @@ -175,7 +167,6 @@ pub(crate) trait Handler: Sync + Send { struct Server> { handler: H, queue: Arc, - rate: limiter::Rate, _rpc: std::marker::PhantomData, } @@ -189,55 +180,51 @@ impl> ServerTrait for Server { /// Serves the incoming RPCs, respecting the rate limit and /// max inflight limit. async fn serve(&self, ctx: &ctx::Ctx) -> ctx::OrCanceled<()> { - let limiter = limiter::Limiter::new(ctx, self.rate); scope::run!(ctx, |ctx, s| async { - for _ in 0..R::INFLIGHT { + loop { + let stream = self.queue.reserve(ctx).await?; s.spawn::<()>(async { - loop { - let permit = limiter.acquire(ctx, 1).await?; - let mut stream = self.queue.open(ctx).await?; - drop(permit); - let res = async { - let recv_time = ctx.now(); - let (req, msg_size) = frame::mux_recv_proto::( - ctx, - &mut stream.read, - self.handler.max_req_size(), - ) - .await?; + let res = async { + let mut stream = stream.open(ctx).await??; + let recv_time = ctx.now(); + let (req, msg_size) = frame::mux_recv_proto::( + ctx, + &mut stream.read, + self.handler.max_req_size(), + ) + .await?; - let size_labels = CallType::ReqRecv.to_labels::(&req); - let resp_size_labels = CallType::RespSent.to_labels::(&req); - RPC_METRICS.message_size[&size_labels].observe(msg_size); - let inflight_labels = CallType::Server.to_labels::(&req); - let _guard = RPC_METRICS.inflight[&inflight_labels].inc_guard(1); - let mut server_process_labels = - CallLatencyType::ServerProcess.to_labels::(&req, &Ok(())); - let mut recv_send_labels = - CallLatencyType::ServerRecvSend.to_labels::(&req, &Ok(())); + let size_labels = CallType::ReqRecv.to_labels::(&req); + let resp_size_labels = CallType::RespSent.to_labels::(&req); + RPC_METRICS.message_size[&size_labels].observe(msg_size); + let inflight_labels = CallType::Server.to_labels::(&req); + let _guard = RPC_METRICS.inflight[&inflight_labels].inc_guard(1); + let mut server_process_labels = + CallLatencyType::ServerProcess.to_labels::(&req, &Ok(())); + let mut recv_send_labels = + CallLatencyType::ServerRecvSend.to_labels::(&req, &Ok(())); - let process_time = ctx.now(); - let res = self.handler.handle(ctx, req).await.context(R::METHOD); - server_process_labels.set_result(&res); - RPC_METRICS.latency[&server_process_labels] - .observe_latency(ctx.now() - process_time); + let process_time = ctx.now(); + let res = self.handler.handle(ctx, req).await.context(R::METHOD); + server_process_labels.set_result(&res); + RPC_METRICS.latency[&server_process_labels] + .observe_latency(ctx.now() - process_time); - let res = frame::mux_send_proto(ctx, &mut stream.write, &res?).await; - recv_send_labels.set_result(&res); - RPC_METRICS.latency[&recv_send_labels] - .observe_latency(ctx.now() - recv_time); - let msg_size = res?; - RPC_METRICS.message_size[&resp_size_labels].observe(msg_size); - anyhow::Ok(()) - } - .await; - if let Err(err) = res { - tracing::info!("{err:#}"); - } + let res = frame::mux_send_proto(ctx, &mut stream.write, &res?).await; + recv_send_labels.set_result(&res); + RPC_METRICS.latency[&recv_send_labels] + .observe_latency(ctx.now() - recv_time); + let msg_size = res?; + RPC_METRICS.message_size[&resp_size_labels].observe(msg_size); + anyhow::Ok(()) } + .await; + if let Err(err) = res { + tracing::info!("{err:#}"); + } + Ok(()) }); } - Ok(()) }) .await } @@ -282,10 +269,11 @@ impl<'a> Service<'a> { /// Adds a server to the RPC service. pub(crate) fn add_server( mut self, + ctx: &ctx::Ctx, handler: impl Handler + 'a, rate: limiter::Rate, ) -> Self { - let queue = mux::StreamQueue::new(R::INFLIGHT); + let queue = mux::StreamQueue::new(ctx, R::INFLIGHT, rate); if self .mux .connect @@ -300,7 +288,6 @@ impl<'a> Service<'a> { self.servers.push(Box::new(Server { handler, queue, - rate, _rpc: std::marker::PhantomData, })); self diff --git a/node/actors/network/src/rpc/tests.rs b/node/actors/network/src/rpc/tests.rs index 3770320c..b3490406 100644 --- a/node/actors/network/src/rpc/tests.rs +++ b/node/actors/network/src/rpc/tests.rs @@ -50,7 +50,7 @@ async fn test_ping() { s.spawn_bg(async { expected( Service::new() - .add_server(ping::Server, ping::RATE) + .add_server(ctx, ping::Server, ping::RATE) .run(ctx, s1) .await, ) @@ -64,12 +64,10 @@ async fn test_ping() { let resp = client.call(ctx, &req, kB).await?; assert_eq!(req.0, resp.0); } - let now = ctx.now(); - clock.set_advance_on_sleep(); + clock.advance(ping::RATE.refresh); let req = ping::Req(ctx.rng().gen()); let resp = client.call(ctx, &req, kB).await?; assert_eq!(req.0, resp.0); - assert!(ctx.now() >= now + ping::RATE.refresh); Ok(()) }) .await @@ -120,6 +118,7 @@ async fn test_ping_loop() { expected( Service::new() .add_server( + ctx, server, limiter::Rate { burst: 1, @@ -185,7 +184,7 @@ async fn test_inflight() { s.spawn_bg(async { expected( Service::new() - .add_server(ExampleServer, RATE) + .add_server(ctx, ExampleServer, RATE) .run(ctx, s1) .await, ) diff --git a/node/actors/network/src/testonly.rs b/node/actors/network/src/testonly.rs index ef6bdcbf..8ae85f37 100644 --- a/node/actors/network/src/testonly.rs +++ b/node/actors/network/src/testonly.rs @@ -95,6 +95,7 @@ pub fn new_configs( max_block_size: usize::MAX, tcp_accept_rate: limiter::Rate::INF, rpc: RpcConfig::default(), + max_block_queue_size: 10, } }); let mut cfgs: Vec<_> = configs.collect(); @@ -131,6 +132,7 @@ pub fn new_fullnode(rng: &mut impl Rng, peer: &Config) -> Config { max_block_size: usize::MAX, tcp_accept_rate: limiter::Rate::INF, rpc: RpcConfig::default(), + max_block_queue_size: 10, } } diff --git a/node/actors/sync_blocks/Cargo.toml b/node/actors/sync_blocks/Cargo.toml deleted file mode 100644 index b478460d..00000000 --- a/node/actors/sync_blocks/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "zksync_consensus_sync_blocks" -version = "0.1.0" -edition.workspace = true -authors.workspace = true -homepage.workspace = true -license.workspace = true - -[dependencies] -zksync_concurrency.workspace = true -zksync_consensus_network.workspace = true -zksync_consensus_roles.workspace = true -zksync_consensus_storage.workspace = true -zksync_consensus_utils.workspace = true - -anyhow.workspace = true -rand.workspace = true -thiserror.workspace = true -tracing.workspace = true - -[dev-dependencies] -assert_matches.workspace = true -async-trait.workspace = true -test-casing.workspace = true -tokio.workspace = true - -[lints] -workspace = true diff --git a/node/actors/sync_blocks/src/config.rs b/node/actors/sync_blocks/src/config.rs deleted file mode 100644 index a3c9c88a..00000000 --- a/node/actors/sync_blocks/src/config.rs +++ /dev/null @@ -1,56 +0,0 @@ -//! Configuration for the `SyncBlocks` actor. -use zksync_concurrency::time; - -/// Configuration for the `SyncBlocks` actor. -#[derive(Debug)] -pub struct Config { - /// Maximum number of blocks to attempt to get concurrently from all peers in total. - pub(crate) max_concurrent_blocks: usize, - /// Maximum number of blocks to attempt to get concurrently from any single peer. - pub(crate) max_concurrent_blocks_per_peer: usize, - /// Interval between re-checking peers to get a specific block if no peers currently should have - /// the block. - pub(crate) sleep_interval_for_get_block: time::Duration, -} - -impl Default for Config { - fn default() -> Self { - Self::new() - } -} - -impl Config { - /// Creates a new configuration with the provided mandatory params. - pub fn new() -> Self { - Self { - max_concurrent_blocks: 20, - max_concurrent_blocks_per_peer: 5, - sleep_interval_for_get_block: time::Duration::seconds(10), - } - } - - /// Sets the maximum number of blocks to attempt to get concurrently. - pub fn with_max_concurrent_blocks(mut self, blocks: usize) -> anyhow::Result { - anyhow::ensure!(blocks > 0, "Number of blocks must be positive"); - self.max_concurrent_blocks = blocks; - Ok(self) - } - - /// Maximum number of blocks to attempt to get concurrently from any single peer. - pub fn with_max_concurrent_blocks_per_peer(mut self, blocks: usize) -> anyhow::Result { - anyhow::ensure!(blocks > 0, "Number of blocks must be positive"); - self.max_concurrent_blocks_per_peer = blocks; - Ok(self) - } - - /// Sets the interval between re-checking peers to get a specific block if no peers currently - /// should have the block. - pub fn with_sleep_interval_for_get_block( - mut self, - interval: time::Duration, - ) -> anyhow::Result { - anyhow::ensure!(interval.is_positive(), "Sleep interval must be positive"); - self.sleep_interval_for_get_block = interval; - Ok(self) - } -} diff --git a/node/actors/sync_blocks/src/io.rs b/node/actors/sync_blocks/src/io.rs deleted file mode 100644 index cb4a9225..00000000 --- a/node/actors/sync_blocks/src/io.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! Input and output messages for the [`SyncBlocks`](crate::SyncBlocks) actor. - -use zksync_consensus_network::io::{SyncBlocksInputMessage, SyncBlocksRequest}; - -/// All the messages that other actors can send to the `SyncBlocks` actor. -#[derive(Debug)] -pub enum InputMessage { - /// Message types from the Network actor. - Network(SyncBlocksRequest), -} - -impl From for InputMessage { - fn from(request: SyncBlocksRequest) -> Self { - Self::Network(request) - } -} - -/// Messages produced by the `SyncBlocks` actor. -#[derive(Debug)] -pub enum OutputMessage { - /// Message to the Network actor. - Network(SyncBlocksInputMessage), -} - -impl From for OutputMessage { - fn from(message: SyncBlocksInputMessage) -> Self { - Self::Network(message) - } -} diff --git a/node/actors/sync_blocks/src/lib.rs b/node/actors/sync_blocks/src/lib.rs deleted file mode 100644 index e46e340a..00000000 --- a/node/actors/sync_blocks/src/lib.rs +++ /dev/null @@ -1,57 +0,0 @@ -//! # Sync Blocks Actor -//! -//! This crate contains an actor implementing block syncing among nodes, which is tied to the gossip -//! network RPCs. -use crate::io::{InputMessage, OutputMessage}; -use std::sync::Arc; -use zksync_concurrency::{ctx, scope}; -use zksync_consensus_network::io::SyncBlocksRequest; -use zksync_consensus_storage::BlockStore; -use zksync_consensus_utils::pipe::ActorPipe; - -mod config; -pub mod io; -mod peers; -#[cfg(test)] -mod tests; - -pub use crate::config::Config; -use crate::peers::PeerStates; - -impl Config { - /// Runs the sync_blocks actor. - pub async fn run( - self, - ctx: &ctx::Ctx, - mut pipe: ActorPipe, - storage: Arc, - ) -> anyhow::Result<()> { - let peer_states = PeerStates::new(self, storage.clone(), pipe.send); - let result: ctx::Result<()> = scope::run!(ctx, |ctx, s| async { - s.spawn_bg(async { peer_states.run_block_fetcher(ctx).await }); - loop { - match pipe.recv.recv(ctx).await? { - InputMessage::Network(SyncBlocksRequest::UpdatePeerSyncState { - peer, - state, - response, - }) => { - let res = peer_states.update(&peer, state); - if let Err(err) = res { - tracing::info!(%err, ?peer, "peer_states.update()"); - } - response.send(()).ok(); - } - } - } - }) - .await; - - // Since we clearly type cancellation errors, it's easier propagate them up to this entry point, - // rather than catching in the constituent tasks. - result.or_else(|err| match err { - ctx::Error::Canceled(_) => Ok(()), // Cancellation is not propagated as an error - ctx::Error::Internal(err) => Err(err), - }) - } -} diff --git a/node/actors/sync_blocks/src/peers/events.rs b/node/actors/sync_blocks/src/peers/events.rs deleted file mode 100644 index 36c00bed..00000000 --- a/node/actors/sync_blocks/src/peers/events.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Events emitted by `PeerStates` actor. Useful for testing. - -use zksync_consensus_roles::{node, validator::BlockNumber}; - -/// Events emitted by `PeerStates` actor. Only used for tests so far. -#[derive(Debug)] -#[allow(dead_code)] // Variant fields are only read in tests -pub(super) enum PeerStateEvent { - /// Node has successfully downloaded the specified block. - GotBlock(BlockNumber), - /// Received an invalid block from the peer. - RpcFailed { - peer_key: node::PublicKey, - block_number: BlockNumber, - }, - /// Peer was disconnected (i.e., it has dropped a request). - PeerDropped(node::PublicKey), -} diff --git a/node/actors/sync_blocks/src/peers/mod.rs b/node/actors/sync_blocks/src/peers/mod.rs deleted file mode 100644 index 6c838a1c..00000000 --- a/node/actors/sync_blocks/src/peers/mod.rs +++ /dev/null @@ -1,224 +0,0 @@ -//! Peer states tracked by the `SyncBlocks` actor. -#![allow(unused)] -use self::events::PeerStateEvent; -use crate::{io, Config}; -use anyhow::Context as _; -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; -use zksync_concurrency::{ - ctx::{self, channel}, - oneshot, scope, sync, -}; -use zksync_consensus_network::io::SyncBlocksInputMessage; -use zksync_consensus_roles::{ - node, validator, - validator::{BlockNumber, FinalBlock}, -}; -use zksync_consensus_storage::{BlockStore, BlockStoreState}; - -mod events; -#[cfg(test)] -mod tests; - -#[derive(Debug)] -struct PeerState { - state: BlockStoreState, - get_block_semaphore: Arc, -} - -/// Handle for [`PeerStates`] allowing to send updates to it. -#[derive(Debug)] -pub(crate) struct PeerStates { - config: Config, - storage: Arc, - message_sender: channel::UnboundedSender, - - peers: Mutex>, - highest_peer_block: sync::watch::Sender, - events_sender: Option>, -} - -impl PeerStates { - fn genesis(&self) -> &validator::Genesis { - self.storage.genesis() - } - - /// Creates a new instance together with a handle. - pub(crate) fn new( - config: Config, - storage: Arc, - message_sender: channel::UnboundedSender, - ) -> Self { - Self { - config, - storage, - message_sender, - - peers: Mutex::default(), - highest_peer_block: sync::watch::channel(BlockNumber(0)).0, - events_sender: None, - } - } - - /// Updates the known `BlockStore` state of the given peer. - /// This information is used to decide from which peer to fetch - /// a given block. - pub(crate) fn update( - &self, - peer: &node::PublicKey, - state: BlockStoreState, - ) -> anyhow::Result<()> { - use std::collections::hash_map::Entry; - state.verify(self.genesis()).context("state.verify()")?; - let mut peers = self.peers.lock().unwrap(); - match peers.entry(peer.clone()) { - Entry::Occupied(mut e) => e.get_mut().state = state.clone(), - Entry::Vacant(e) => { - let permits = self.config.max_concurrent_blocks_per_peer; - e.insert(PeerState { - state: state.clone(), - get_block_semaphore: Arc::new(sync::Semaphore::new(permits)), - }); - } - } - if let Some(last) = &state.last { - self.highest_peer_block - .send_if_modified(|highest_peer_block| { - if *highest_peer_block >= last.header().number { - return false; - } - *highest_peer_block = last.header().number; - true - }); - } - Ok(()) - } - - /// Task fetching blocks from peers which are not present in storage. - pub(crate) async fn run_block_fetcher(&self, ctx: &ctx::Ctx) -> ctx::Result<()> { - let sem = sync::Semaphore::new(self.config.max_concurrent_blocks); - scope::run!(ctx, |ctx, s| async { - let mut next = self.storage.queued().next(); - let mut highest_peer_block = self.highest_peer_block.subscribe(); - loop { - sync::wait_for(ctx, &mut highest_peer_block, |highest_peer_block| { - highest_peer_block >= &next - }) - .await?; - let permit = sync::acquire(ctx, &sem).await?; - let block_number = ctx::NoCopy(next); - next = next.next(); - s.spawn(async { - let _permit = permit; - self.fetch_block(ctx, block_number.into()).await - }); - } - }) - .await - } - - /// Fetches the block from peers and puts it to storage. - /// Early exits if the block appeared in storage from other source. - async fn fetch_block(&self, ctx: &ctx::Ctx, block_number: BlockNumber) -> ctx::Result<()> { - let _ = scope::run!(ctx, |ctx, s| async { - s.spawn_bg(async { - let block = self.fetch_block_from_peers(ctx, block_number).await?; - self.storage.queue_block(ctx, block).await - }); - // Cancel fetching as soon as block is queued for storage. - self.storage.wait_until_queued(ctx, block_number).await?; - Ok(()) - }) - .await; - self.storage.wait_until_persisted(ctx, block_number).await?; - Ok(()) - } - - /// Fetches the block from peers. - async fn fetch_block_from_peers( - &self, - ctx: &ctx::Ctx, - number: BlockNumber, - ) -> ctx::OrCanceled { - while ctx.is_active() { - let Some(peer) = self.select_peer(number) else { - ctx.sleep(self.config.sleep_interval_for_get_block).await?; - continue; - }; - let res = self.fetch_block_from_peer(ctx, &peer, number).await; - match res { - Ok(block) => { - if let Some(send) = &self.events_sender { - send.send(PeerStateEvent::GotBlock(number)); - } - return Ok(block); - } - Err(ctx::Error::Canceled(_)) => { - tracing::info!(%number, ?peer, "get_block() call canceled"); - } - Err(err) => { - tracing::info!(%err, %number, ?peer, "get_block() failed"); - if let Some(send) = &self.events_sender { - send.send(PeerStateEvent::RpcFailed { - peer_key: peer.clone(), - block_number: number, - }); - } - self.drop_peer(&peer); - } - } - } - Err(ctx::Canceled) - } - - /// Fetches a block from the specified peer. - async fn fetch_block_from_peer( - &self, - ctx: &ctx::Ctx, - peer: &node::PublicKey, - number: BlockNumber, - ) -> ctx::Result { - let (response, response_receiver) = oneshot::channel(); - let message = SyncBlocksInputMessage::GetBlock { - recipient: peer.clone(), - number, - response, - }; - self.message_sender.send(message.into()); - let block = response_receiver - .recv_or_disconnected(ctx) - .await? - .context("no response")? - .context("RPC error")?; - if block.header().number != number { - return Err(anyhow::anyhow!( - "block does not have requested number (requested: {number}, got: {})", - block.header().number - ) - .into()); - } - block.verify(self.genesis()).context("block.validate()")?; - Ok(block) - } - - fn select_peer(&self, block_number: BlockNumber) -> Option { - let peers = self.peers.lock().unwrap(); - peers - .iter() - .find(|(_, s)| s.state.contains(block_number)) - .map(|x| x.0.clone()) - } - - /// Drops peer state. - fn drop_peer(&self, peer: &node::PublicKey) { - if self.peers.lock().unwrap().remove(peer).is_none() { - return; - } - tracing::debug!(?peer, "Dropping peer state"); - if let Some(events_sender) = &self.events_sender { - events_sender.send(PeerStateEvent::PeerDropped(peer.clone())); - } - } -} diff --git a/node/actors/sync_blocks/src/peers/tests/basics.rs b/node/actors/sync_blocks/src/peers/tests/basics.rs deleted file mode 100644 index 4767967e..00000000 --- a/node/actors/sync_blocks/src/peers/tests/basics.rs +++ /dev/null @@ -1,522 +0,0 @@ -//! Basic tests. - -use super::*; -use crate::{ - io, - tests::{make_response, sync_state}, -}; -use rand::seq::SliceRandom as _; - -#[derive(Debug)] -struct UpdatingPeerStateWithSingleBlock; - -#[async_trait] -impl Test for UpdatingPeerStateWithSingleBlock { - const BLOCK_COUNT: usize = 2; - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - setup, - peer_states, - storage, - mut message_receiver, - mut events_receiver, - .. - } = handles; - - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - peer_states - .update(&peer_key, sync_state(&setup, setup.blocks.first())) - .unwrap(); - - // Check that the actor has sent a `get_block` request to the peer - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message; - assert_eq!(recipient, peer_key); - assert_eq!(number, setup.blocks[0].number()); - - // Emulate the peer sending a correct response. - response.send(make_response(setup.blocks.first())).unwrap(); - - let peer_event = events_receiver.recv(ctx).await?; - assert_matches!(peer_event, PeerStateEvent::GotBlock(n) if n == setup.blocks[0].number()); - - // Check that the block has been saved locally. - storage - .wait_until_persisted(ctx, setup.blocks[0].number()) - .await?; - Ok(()) - } -} - -#[tokio::test] -async fn updating_peer_state_with_single_block() { - test_peer_states(UpdatingPeerStateWithSingleBlock).await; -} - -#[derive(Debug)] -struct CancelingBlockRetrieval; - -#[async_trait] -impl Test for CancelingBlockRetrieval { - const BLOCK_COUNT: usize = 5; - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - setup, - peer_states, - storage, - mut message_receiver, - .. - } = handles; - - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - peer_states - .update(&peer_key, sync_state(&setup, setup.blocks.first())) - .unwrap(); - - // Check that the actor has sent a `get_block` request to the peer - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { mut response, .. }) = - message_receiver.recv(ctx).await?; - - // Emulate receiving block using external means. - storage.queue_block(ctx, setup.blocks[0].clone()).await?; - - // Retrieval of the block must be canceled. - response.closed().await; - Ok(()) - } -} - -#[tokio::test] -async fn canceling_block_retrieval() { - test_peer_states(CancelingBlockRetrieval).await; -} - -#[derive(Debug)] -struct FilteringBlockRetrieval; - -#[async_trait] -impl Test for FilteringBlockRetrieval { - const BLOCK_COUNT: usize = 5; - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - setup, - peer_states, - storage, - mut message_receiver, - .. - } = handles; - - // Emulate receiving block using external means. - storage.queue_block(ctx, setup.blocks[0].clone()).await?; - - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - peer_states - .update(&peer_key, sync_state(&setup, setup.blocks.get(1))) - .unwrap(); - - // Check that the actor has sent `get_block` request to the peer, but only for block #2. - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, number, .. - }) = message; - assert_eq!(recipient, peer_key); - assert_eq!(number, setup.blocks[1].number()); - assert!(message_receiver.try_recv().is_none()); - Ok(()) - } -} - -#[tokio::test] -async fn filtering_block_retrieval() { - test_peer_states(FilteringBlockRetrieval).await; -} - -#[derive(Debug)] -struct UpdatingPeerStateWithMultipleBlocks; - -impl UpdatingPeerStateWithMultipleBlocks { - const MAX_CONCURRENT_BLOCKS: usize = 3; -} - -#[async_trait] -impl Test for UpdatingPeerStateWithMultipleBlocks { - const BLOCK_COUNT: usize = 10; - - fn config(&self) -> Config { - let mut config = Config::new(); - config.max_concurrent_blocks_per_peer = Self::MAX_CONCURRENT_BLOCKS; - // ^ We want to test rate limiting for peers - config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - config - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - clock, - setup, - peer_states, - storage, - mut message_receiver, - mut events_receiver, - } = handles; - - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - peer_states - .update(&peer_key, sync_state(&setup, setup.blocks.last()).clone()) - .unwrap(); - - let mut requested_blocks = HashMap::new(); - for _ in setup.blocks.iter() { - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message_receiver.recv(ctx).await.unwrap(); - - tracing::trace!("Received request for block #{number}"); - assert_eq!(recipient, peer_key); - assert!( - requested_blocks.insert(number, response).is_none(), - "Block #{number} requested twice" - ); - - if requested_blocks.len() == Self::MAX_CONCURRENT_BLOCKS || rng.gen() { - // Answer a random request. - let number = *requested_blocks.keys().choose(rng).unwrap(); - let response = requested_blocks.remove(&number).unwrap(); - response.send(make_response(setup.block(number))).unwrap(); - - let peer_event = events_receiver.recv(ctx).await?; - assert_matches!(peer_event, PeerStateEvent::GotBlock(got) if got == number); - } - clock.advance(BLOCK_SLEEP_INTERVAL); - } - - // Answer all remaining requests. - for (number, response) in requested_blocks { - response.send(make_response(setup.block(number))).unwrap(); - let peer_event = events_receiver.recv(ctx).await?; - assert_matches!(peer_event, PeerStateEvent::GotBlock(got) if got == number); - } - - let expected_block_number = BlockNumber(Self::BLOCK_COUNT as u64 - 1); - storage - .wait_until_persisted(ctx, expected_block_number) - .await?; - Ok(()) - } -} - -#[tokio::test] -async fn updating_peer_state_with_multiple_blocks() { - test_peer_states(UpdatingPeerStateWithMultipleBlocks).await; -} - -#[derive(Debug)] -struct DisconnectingPeer; - -#[async_trait] -impl Test for DisconnectingPeer { - const BLOCK_COUNT: usize = 5; - - fn config(&self) -> Config { - let mut config = Config::new(); - config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - config - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - clock, - setup, - peer_states, - storage, - mut message_receiver, - mut events_receiver, - } = handles; - - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - peer_states - .update(&peer_key, sync_state(&setup, setup.blocks.first())) - .unwrap(); - - // Drop the response sender emulating peer disconnect. - let msg = message_receiver.recv(ctx).await?; - { - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - .. - }) = &msg; - assert_eq!(recipient, &peer_key); - assert_eq!(number, &setup.blocks[0].number()); - } - drop(msg); - - wait_for_event( - ctx, - &mut events_receiver, - |ev| matches!(ev, PeerStateEvent::PeerDropped(key) if key == peer_key), - ) - .await - .context("wait for PeerDropped")?; - - // Check that no new requests are sent (there are no peers to send them to). - clock.advance(BLOCK_SLEEP_INTERVAL); - assert_matches!(message_receiver.try_recv(), None); - - // Re-connect the peer with an updated state. - peer_states - .update(&peer_key, sync_state(&setup, setup.blocks.get(1))) - .unwrap(); - // Ensure that blocks are re-requested. - clock.advance(BLOCK_SLEEP_INTERVAL); - - let mut responses = HashMap::with_capacity(2); - for _ in 0..2 { - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message; - assert_eq!(recipient, peer_key); - assert!(responses.insert(number, response).is_none()); - } - - assert!(responses.contains_key(&setup.blocks[0].number())); - assert!(responses.contains_key(&setup.blocks[1].number())); - // Send one of the responses and drop the other request. - let response = responses.remove(&setup.blocks[1].number()).unwrap(); - response.send(make_response(setup.blocks.get(1))).unwrap(); - - wait_for_event( - ctx, - &mut events_receiver, - |ev| matches!(ev, PeerStateEvent::GotBlock(n) if n==setup.blocks[1].number()), - ) - .await?; - drop(responses); - wait_for_event( - ctx, - &mut events_receiver, - |ev| matches!(ev, PeerStateEvent::PeerDropped(key) if key == peer_key), - ) - .await?; - - // Check that no new requests are sent (there are no peers to send them to). - clock.advance(BLOCK_SLEEP_INTERVAL); - assert_matches!(message_receiver.try_recv(), None); - - // Re-connect the peer with the same state. - peer_states - .update(&peer_key, sync_state(&setup, setup.blocks.get(1))) - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message; - assert_eq!(recipient, peer_key); - assert_eq!(number, setup.blocks[0].number()); - response.send(make_response(setup.blocks.first())).unwrap(); - - let peer_event = events_receiver.recv(ctx).await?; - assert_matches!(peer_event, PeerStateEvent::GotBlock(n) if n==setup.blocks[0].number()); - - // Check that no new requests are sent (all blocks are downloaded). - clock.advance(BLOCK_SLEEP_INTERVAL); - assert_matches!(message_receiver.try_recv(), None); - - storage.wait_until_persisted(ctx, BlockNumber(1)).await?; - Ok(()) - } -} - -#[tokio::test] -async fn disconnecting_peer() { - test_peer_states(DisconnectingPeer).await; -} - -#[derive(Debug)] -struct DownloadingBlocksInGaps { - local_blocks: Vec, - increase_peer_block_number_during_test: bool, -} - -impl DownloadingBlocksInGaps { - fn new(local_blocks: &[usize]) -> Self { - Self { - local_blocks: local_blocks.to_vec(), - increase_peer_block_number_during_test: false, - } - } -} - -#[async_trait] -impl Test for DownloadingBlocksInGaps { - const BLOCK_COUNT: usize = 10; - - fn config(&self) -> Config { - let mut config = Config::new(); - config.max_concurrent_blocks = 1; - // ^ Forces the node to download blocks in a deterministic order - config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - config - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - clock, - setup, - peer_states, - storage, - mut message_receiver, - .. - } = handles; - - scope::run!(ctx, |ctx, s| async { - for n in &self.local_blocks { - s.spawn(storage.queue_block(ctx, setup.blocks[*n].clone())); - } - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - let mut last_peer_block = if self.increase_peer_block_number_during_test { - setup.blocks.choose(rng) - } else { - setup.blocks.last() - }; - peer_states - .update(&peer_key, sync_state(&setup, last_peer_block)) - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - - // Check that all missing blocks are requested. - for n in 0..setup.blocks.len() { - if self.local_blocks.contains(&n) { - continue; - } - let n = setup.blocks[n].number(); - if n > last_peer_block.unwrap().number() { - last_peer_block = setup.blocks.iter().filter(|b| b.number() >= n).choose(rng); - peer_states - .update(&peer_key, sync_state(&setup, last_peer_block)) - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - } - - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message_receiver.recv(ctx).await?; - - assert_eq!(recipient, peer_key); - assert!(number <= last_peer_block.unwrap().number()); - response.send(make_response(setup.block(number))).unwrap(); - storage.wait_until_persisted(ctx, number).await?; - clock.advance(BLOCK_SLEEP_INTERVAL); - } - Ok(()) - }) - .await?; - Ok(()) - } -} - -const LOCAL_BLOCK_NUMBERS: [&[usize]; 3] = [&[1, 9], &[3, 5, 6, 8], &[4]]; - -#[test_casing(6, Product((LOCAL_BLOCK_NUMBERS, [false, true])))] -#[tokio::test] -async fn downloading_blocks_in_gaps( - local_blocks: &[usize], - increase_peer_block_number_during_test: bool, -) { - let mut test = DownloadingBlocksInGaps::new(local_blocks); - test.increase_peer_block_number_during_test = increase_peer_block_number_during_test; - test_peer_states(test).await; -} - -#[derive(Debug)] -struct LimitingGetBlockConcurrency; - -#[async_trait] -impl Test for LimitingGetBlockConcurrency { - const BLOCK_COUNT: usize = 5; - - fn config(&self) -> Config { - let mut config = Config::new(); - config.max_concurrent_blocks = 3; - config - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - setup, - peer_states, - storage, - mut message_receiver, - .. - } = handles; - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - peer_states - .update(&peer_key, sync_state(&setup, setup.blocks.last())) - .unwrap(); - - // The actor should request 3 new blocks it's now aware of from the only peer it's currently - // aware of. Note that blocks may be queried in any order. - let mut message_responses = HashMap::new(); - for _ in 0..3 { - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message_receiver.recv(ctx).await?; - assert_eq!(recipient, peer_key); - assert!(message_responses.insert(number, response).is_none()); - } - assert_matches!(message_receiver.try_recv(), None); - assert_eq!( - message_responses.keys().copied().collect::>(), - setup.blocks[0..3].iter().map(|b| b.number()).collect(), - ); - tracing::info!("blocks requested"); - - // Send a correct response. - let response = message_responses.remove(&setup.blocks[0].number()).unwrap(); - response.send(make_response(setup.blocks.first())).unwrap(); - storage - .wait_until_persisted(ctx, setup.blocks[0].number()) - .await?; - - // The actor should now request another block. - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, number, .. - }) = message_receiver.recv(ctx).await?; - assert_eq!(recipient, peer_key); - assert_eq!(number, setup.blocks[3].number()); - - Ok(()) - } -} - -#[tokio::test] -async fn limiting_get_block_concurrency() { - test_peer_states(LimitingGetBlockConcurrency).await; -} diff --git a/node/actors/sync_blocks/src/peers/tests/fakes.rs b/node/actors/sync_blocks/src/peers/tests/fakes.rs deleted file mode 100644 index 95c6ddd8..00000000 --- a/node/actors/sync_blocks/src/peers/tests/fakes.rs +++ /dev/null @@ -1,141 +0,0 @@ -//! Tests focused on handling peers providing fake information to the node. - -use super::*; -use crate::tests::sync_state; -use zksync_consensus_roles::{validator, validator::testonly::Setup}; -use zksync_consensus_storage::testonly::new_store; - -#[tokio::test] -async fn processing_invalid_sync_states() { - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let mut setup = Setup::new(rng, 4); - setup.push_blocks(rng, 3); - let (storage, _runner) = new_store(ctx, &setup.genesis).await; - - let (message_sender, _) = channel::unbounded(); - let peer_states = PeerStates::new(Config::new(), storage, message_sender); - let peer = &rng.gen::().public(); - - let mut invalid_block = setup.blocks[1].clone(); - invalid_block.justification.message.proposal.number = rng.gen(); - let invalid_sync_state = sync_state(&setup, Some(&invalid_block)); - assert!(peer_states.update(peer, invalid_sync_state).is_err()); - - let mut other_network = Setup::new(rng, 4); - other_network.push_blocks(rng, 2); - let invalid_sync_state = sync_state(&other_network, other_network.blocks.get(1)); - assert!(peer_states.update(peer, invalid_sync_state).is_err()); -} - -#[derive(Debug)] -struct PeerWithFakeSyncState; - -#[async_trait] -impl Test for PeerWithFakeSyncState { - const BLOCK_COUNT: usize = 10; - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - clock, - setup, - peer_states, - mut events_receiver, - .. - } = handles; - - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - let mut invalid_block = setup.blocks[1].clone(); - invalid_block.justification.message.proposal.number = rng.gen(); - let fake_sync_state = sync_state(&setup, Some(&invalid_block)); - assert!(peer_states.update(&peer_key, fake_sync_state).is_err()); - - clock.advance(BLOCK_SLEEP_INTERVAL); - assert_matches!(events_receiver.try_recv(), None); - Ok(()) - } -} - -#[tokio::test] -async fn receiving_fake_sync_state_from_peer() { - test_peer_states(PeerWithFakeSyncState).await; -} - -#[derive(Debug)] -struct PeerWithFakeBlock; - -#[async_trait] -impl Test for PeerWithFakeBlock { - const BLOCK_COUNT: usize = 10; - - fn config(&self) -> Config { - let mut cfg = Config::new(); - cfg.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - cfg - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - clock, - setup, - peer_states, - storage, - mut message_receiver, - mut events_receiver, - } = handles; - - let rng = &mut ctx.rng(); - - for fake_block in [ - // other block than requested - setup.blocks[1].clone(), - // block with wrong validator set - { - let mut s = Setup::new(rng, 4); - s.push_blocks(rng, 1); - s.blocks[0].clone() - }, - // block with mismatching payload, - { - let mut block = setup.blocks[0].clone(); - block.payload = validator::Payload(b"invalid".to_vec()); - block - }, - ] { - let key = rng.gen::().public(); - peer_states - .update(&key, sync_state(&setup, setup.blocks.first())) - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message_receiver.recv(ctx).await?; - assert_eq!(recipient, key); - assert_eq!(number, setup.blocks[0].number()); - response.send(Ok(fake_block)).unwrap(); - - wait_for_event(ctx, &mut events_receiver, |ev| { - matches!(ev, - PeerStateEvent::RpcFailed { - block_number, - peer_key, - } if peer_key == key && block_number == number - ) - }) - .await?; - } - - // The invalid block must not be saved. - assert!(storage.block(ctx, BlockNumber(1)).await?.is_none()); - Ok(()) - } -} - -#[tokio::test] -async fn receiving_fake_block_from_peer() { - test_peer_states(PeerWithFakeBlock).await; -} diff --git a/node/actors/sync_blocks/src/peers/tests/mod.rs b/node/actors/sync_blocks/src/peers/tests/mod.rs deleted file mode 100644 index 01120c5b..00000000 --- a/node/actors/sync_blocks/src/peers/tests/mod.rs +++ /dev/null @@ -1,157 +0,0 @@ -use super::*; -use assert_matches::assert_matches; -use async_trait::async_trait; -use rand::{seq::IteratorRandom, Rng}; -use std::{collections::HashSet, fmt}; -use test_casing::{test_casing, Product}; -use tracing::instrument; -use zksync_concurrency::{ - testonly::{abort_on_panic, set_timeout}, - time, -}; -use zksync_consensus_roles::validator; -use zksync_consensus_storage::testonly::new_store; - -mod basics; -mod fakes; -mod multiple_peers; - -const TEST_TIMEOUT: time::Duration = time::Duration::seconds(5); -const BLOCK_SLEEP_INTERVAL: time::Duration = time::Duration::milliseconds(5); - -async fn wait_for_event( - ctx: &ctx::Ctx, - events: &mut channel::UnboundedReceiver, - pred: impl Fn(PeerStateEvent) -> bool, -) -> ctx::OrCanceled<()> { - while !pred(events.recv(ctx).await?) {} - Ok(()) -} - -#[derive(Debug)] -struct TestHandles { - clock: ctx::ManualClock, - setup: validator::testonly::Setup, - peer_states: Arc, - storage: Arc, - message_receiver: channel::UnboundedReceiver, - events_receiver: channel::UnboundedReceiver, -} - -#[async_trait] -trait Test: fmt::Debug + Send + Sync { - const BLOCK_COUNT: usize; - // TODO: move this to genesis - const GENESIS_BLOCK_NUMBER: usize = 0; - - fn config(&self) -> Config { - Config::new() - } - - async fn initialize_storage( - &self, - _ctx: &ctx::Ctx, - _storage: &BlockStore, - _setup: &validator::testonly::Setup, - ) { - // Does nothing by default - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()>; -} - -#[instrument(level = "trace")] -async fn test_peer_states(test: T) { - abort_on_panic(); - let _guard = set_timeout(TEST_TIMEOUT); - - let clock = ctx::ManualClock::new(); - let ctx = &ctx::test_root(&clock); - let rng = &mut ctx.rng(); - let mut setup = validator::testonly::Setup::new(rng, 4); - setup.push_blocks(rng, T::BLOCK_COUNT); - let (store, store_run) = new_store(ctx, &setup.genesis).await; - test.initialize_storage(ctx, store.as_ref(), &setup).await; - - let (message_sender, message_receiver) = channel::unbounded(); - let (events_sender, events_receiver) = channel::unbounded(); - let mut peer_states = PeerStates::new(test.config(), store.clone(), message_sender); - peer_states.events_sender = Some(events_sender); - let peer_states = Arc::new(peer_states); - let test_handles = TestHandles { - clock, - setup, - peer_states: peer_states.clone(), - storage: store.clone(), - message_receiver, - events_receiver, - }; - - scope::run!(ctx, |ctx, s| async { - s.spawn_bg(store_run.run(ctx)); - s.spawn_bg(async { - peer_states.run_block_fetcher(ctx).await.ok(); - Ok(()) - }); - test.test(ctx, test_handles).await - }) - .await - .unwrap(); -} - -#[tokio::test] -async fn test_try_acquire_peer_permit() { - let clock = ctx::ManualClock::new(); - let ctx = &ctx::test_root(&clock); - let rng = &mut ctx.rng(); - let mut setup = validator::testonly::Setup::new(rng, 1); - setup.push_blocks(rng, 10); - scope::run!(ctx, |ctx, s| async { - let (store, runner) = new_store(ctx, &setup.genesis).await; - s.spawn_bg(runner.run(ctx)); - let (send, _recv) = ctx::channel::unbounded(); - let peer_states = PeerStates::new(Config::default(), store, send); - - let peer: node::PublicKey = rng.gen(); - let b = &setup.blocks; - for s in [ - // Empty entry. - BlockStoreState { - first: b[0].number(), - last: None, - }, - // Entry with some blocks. - BlockStoreState { - first: b[0].number(), - last: Some(b[3].justification.clone()), - }, - // Entry with changed first. - BlockStoreState { - first: b[1].number(), - last: Some(b[3].justification.clone()), - }, - // Empty entry again. - BlockStoreState { - first: b[1].number(), - last: None, - }, - ] { - peer_states.update(&peer, s.clone()).unwrap(); - for block in b { - let got = peer_states.select_peer(block.number()); - if s.first <= block.number() - && s.last - .as_ref() - .map_or(false, |last| block.number() <= last.header().number) - { - assert_eq!(Some(peer.clone()), got); - } else { - assert_eq!(None, got); - } - } - } - Ok(()) - }) - .await - .unwrap(); -} diff --git a/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs b/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs deleted file mode 100644 index 4281c69c..00000000 --- a/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs +++ /dev/null @@ -1,345 +0,0 @@ -use super::*; -use crate::tests::{make_response, sync_state}; - -#[derive(Debug)] -struct RequestingBlocksFromTwoPeers; - -#[async_trait] -impl Test for RequestingBlocksFromTwoPeers { - const BLOCK_COUNT: usize = 5; - - fn config(&self) -> Config { - let mut config = Config::new(); - config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - config.max_concurrent_blocks = 5; - config.max_concurrent_blocks_per_peer = 1; - config - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - clock, - setup, - peer_states, - storage, - mut message_receiver, - mut events_receiver, - } = handles; - - let rng = &mut ctx.rng(); - let first_peer = rng.gen::().public(); - peer_states - .update(&first_peer, sync_state(&setup, setup.blocks.get(1))) - .unwrap(); - - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number: first_peer_block_number, - response: first_peer_response, - }) = message_receiver.recv(ctx).await?; - assert_eq!(recipient, first_peer); - assert!(setup.blocks[0..=1] - .iter() - .any(|b| b.number() == first_peer_block_number)); - tracing::info!(%first_peer_block_number, "received request"); - - let second_peer = rng.gen::().public(); - peer_states - .update(&second_peer, sync_state(&setup, setup.blocks.get(3))) - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number: second_peer_block_number, - response: second_peer_response, - }) = message_receiver.recv(ctx).await?; - assert_eq!(recipient, second_peer); - assert!(setup.blocks[0..=1] - .iter() - .any(|b| b.number() == second_peer_block_number)); - tracing::info!(%second_peer_block_number, "received request"); - - first_peer_response - .send(make_response(setup.block(first_peer_block_number))) - .unwrap(); - wait_for_event( - ctx, - &mut events_receiver, - |ev| matches!(ev, PeerStateEvent::GotBlock(num) if num == first_peer_block_number), - ) - .await - .unwrap(); - // The node shouldn't send more requests to the first peer since it would be beyond - // its known latest block number (2). - clock.advance(BLOCK_SLEEP_INTERVAL); - assert_matches!(message_receiver.try_recv(), None); - - peer_states - .update(&first_peer, sync_state(&setup, setup.blocks.get(3))) - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - // Now the actor can get block #3 from the peer. - - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number: first_peer_block_number, - response: first_peer_response, - }) = message_receiver.recv(ctx).await?; - assert_eq!(recipient, first_peer); - assert!(setup.blocks[2..=3] - .iter() - .any(|b| b.number() == first_peer_block_number)); - tracing::info!(%first_peer_block_number, "received request"); - - first_peer_response - .send(make_response(setup.block(first_peer_block_number))) - .unwrap(); - wait_for_event( - ctx, - &mut events_receiver, - |ev| matches!(ev, PeerStateEvent::GotBlock(num) if num == first_peer_block_number), - ) - .await - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number: first_peer_block_number, - response: first_peer_response, - }) = message_receiver.recv(ctx).await?; - assert_eq!(recipient, first_peer); - assert!(setup.blocks[2..=3] - .iter() - .any(|b| b.number() == first_peer_block_number)); - tracing::info!(%first_peer_block_number, "received request"); - - second_peer_response - .send(make_response(setup.block(second_peer_block_number))) - .unwrap(); - wait_for_event( - ctx, - &mut events_receiver, - |ev| matches!(ev, PeerStateEvent::GotBlock(num) if num == second_peer_block_number), - ) - .await - .unwrap(); - first_peer_response - .send(make_response(setup.block(first_peer_block_number))) - .unwrap(); - wait_for_event( - ctx, - &mut events_receiver, - |ev| matches!(ev, PeerStateEvent::GotBlock(num) if num == first_peer_block_number), - ) - .await - .unwrap(); - // No more blocks should be requested from peers. - clock.advance(BLOCK_SLEEP_INTERVAL); - assert_matches!(message_receiver.try_recv(), None); - - storage - .wait_until_persisted(ctx, setup.blocks[3].number()) - .await?; - Ok(()) - } -} - -/*#[tokio::test] -async fn requesting_blocks_from_two_peers() { - test_peer_states(RequestingBlocksFromTwoPeers).await; -}*/ - -#[derive(Debug, Clone, Copy)] -struct PeerBehavior { - /// The peer will go offline after this block. - last_block: usize, - /// The peer will stop responding after this block, but will still announce `SyncState` updates. - /// Logically, should be `<= last_block`. - last_block_to_return: usize, -} - -impl Default for PeerBehavior { - fn default() -> Self { - Self { - last_block: usize::MAX, - last_block_to_return: usize::MAX, - } - } -} - -#[derive(Debug, Clone)] -struct RequestingBlocksFromMultiplePeers { - peer_behavior: Vec, - max_concurrent_blocks_per_peer: usize, - respond_probability: f64, -} - -impl RequestingBlocksFromMultiplePeers { - fn new(peer_count: usize, max_concurrent_blocks_per_peer: usize) -> Self { - Self { - peer_behavior: vec![PeerBehavior::default(); peer_count], - max_concurrent_blocks_per_peer, - respond_probability: 0.0, - } - } - - fn create_peers(&self, rng: &mut impl Rng) -> HashMap { - let last_block_number = Self::BLOCK_COUNT - 1; - let peers = self.peer_behavior.iter().copied().map(|behavior| { - let behavior = PeerBehavior { - last_block: behavior.last_block.min(last_block_number), - last_block_to_return: behavior.last_block_to_return.min(last_block_number), - }; - let peer_key = rng.gen::().public(); - (peer_key, behavior) - }); - peers.collect() - } -} - -#[async_trait] -impl Test for RequestingBlocksFromMultiplePeers { - const BLOCK_COUNT: usize = 20; - - fn config(&self) -> Config { - let mut config = Config::new(); - config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - config.max_concurrent_blocks_per_peer = self.max_concurrent_blocks_per_peer; - config - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - clock, - setup, - peer_states, - storage, - mut message_receiver, - mut events_receiver, - } = handles; - - let rng = &mut ctx.rng(); - let peers = &self.create_peers(rng); - - scope::run!(ctx, |ctx, s| async { - // Announce peer states. - for (peer_key, peer) in peers { - peer_states.update(peer_key, sync_state(&setup, setup.blocks.get(peer.last_block))).unwrap(); - } - - s.spawn_bg(async { - let mut responses_by_peer: HashMap<_, Vec<_>> = HashMap::new(); - let mut requested_blocks = HashSet::new(); - while requested_blocks.len() < Self::BLOCK_COUNT { - let Ok(message) = message_receiver.recv(ctx).await else { - return Ok(()); // Test is finished - }; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message; - - tracing::trace!("Block #{number} requested from {recipient:?}"); - assert!(number <= setup.blocks[peers[&recipient].last_block].number()); - - if setup.blocks[peers[&recipient].last_block_to_return].number() < number { - tracing::trace!("Dropping request for block #{number} to {recipient:?}"); - continue; - } - - assert!( - requested_blocks.insert(number), - "Block #{number} requested twice from a responsive peer" - ); - let peer_responses = responses_by_peer.entry(recipient).or_default(); - peer_responses.push((number, response)); - assert!(peer_responses.len() <= self.max_concurrent_blocks_per_peer); - if peer_responses.len() == self.max_concurrent_blocks_per_peer { - // Peer is at capacity, respond to a random request in order to progress - let idx = rng.gen_range(0..peer_responses.len()); - let (number, response) = peer_responses.remove(idx); - response.send(make_response(setup.block(number))).unwrap(); - } - - // Respond to some other random requests. - for peer_responses in responses_by_peer.values_mut() { - // Indexes are reversed in order to not be affected by removals. - for idx in (0..peer_responses.len()).rev() { - if !rng.gen_bool(self.respond_probability) { - continue; - } - let (number, response) = peer_responses.remove(idx); - response.send(make_response(setup.block(number))).unwrap(); - } - } - } - - // Answer to all remaining responses - for (number, response) in responses_by_peer.into_values().flatten() { - response.send(make_response(setup.block(number))).unwrap(); - } - Ok(()) - }); - - // We advance the clock when a node receives a new block or updates a peer state, - // since in both cases some new blocks may become available for download. - let mut block_numbers = HashSet::with_capacity(Self::BLOCK_COUNT); - while block_numbers.len() < Self::BLOCK_COUNT { - let peer_event = events_receiver.recv(ctx).await?; - match peer_event { - PeerStateEvent::GotBlock(number) => { - assert!( - block_numbers.insert(number), - "Block #{number} received twice" - ); - clock.advance(BLOCK_SLEEP_INTERVAL); - } - PeerStateEvent::RpcFailed{..} | PeerStateEvent::PeerDropped(_) => { /* Do nothing */ } - } - } - - storage.wait_until_persisted(ctx,setup.blocks.last().unwrap().header().number).await?; - Ok(()) - }) - .await - } -} - -const RESPOND_PROBABILITIES: [f64; 5] = [0.0, 0.1, 0.2, 0.5, 0.9]; - -#[test_casing(15, Product(([1, 2, 3], RESPOND_PROBABILITIES)))] -#[tokio::test] -async fn requesting_blocks(max_concurrent_blocks_per_peer: usize, respond_probability: f64) { - let mut test = RequestingBlocksFromMultiplePeers::new(3, max_concurrent_blocks_per_peer); - test.respond_probability = respond_probability; - test_peer_states(test.clone()).await; -} - -#[test_casing(15, Product(([1, 2, 3], RESPOND_PROBABILITIES)))] -#[tokio::test] -async fn requesting_blocks_with_failures( - max_concurrent_blocks_per_peer: usize, - respond_probability: f64, -) { - let mut test = RequestingBlocksFromMultiplePeers::new(3, max_concurrent_blocks_per_peer); - test.respond_probability = respond_probability; - test.peer_behavior[0].last_block = 5; - test.peer_behavior[1].last_block = 15; - test_peer_states(test).await; -} - -#[test_casing(15, Product(([1, 2, 3], RESPOND_PROBABILITIES)))] -#[tokio::test] -async fn requesting_blocks_with_unreliable_peers( - max_concurrent_blocks_per_peer: usize, - respond_probability: f64, -) { - let mut test = RequestingBlocksFromMultiplePeers::new(3, max_concurrent_blocks_per_peer); - test.respond_probability = respond_probability; - test.peer_behavior[0].last_block_to_return = 5; - test.peer_behavior[1].last_block_to_return = 15; - test_peer_states(test).await; -} diff --git a/node/actors/sync_blocks/src/tests/end_to_end.rs b/node/actors/sync_blocks/src/tests/end_to_end.rs deleted file mode 100644 index 0a0f1849..00000000 --- a/node/actors/sync_blocks/src/tests/end_to_end.rs +++ /dev/null @@ -1,393 +0,0 @@ -//! End-to-end tests that launch a network of nodes and the `SyncBlocks` actor for each node. -use super::*; -use anyhow::Context as _; -use async_trait::async_trait; -use rand::seq::SliceRandom; -use std::fmt; -use test_casing::test_casing; -use tracing::{instrument, Instrument}; -use zksync_concurrency::{ - ctx, - ctx::channel, - scope, - testonly::{abort_on_panic, set_timeout}, -}; -use zksync_consensus_network as network; -use zksync_consensus_storage::testonly::new_store_with_first; - -type NetworkDispatcherPipe = - pipe::DispatcherPipe; - -#[derive(Debug)] -struct Node { - store: Arc, - start: channel::Sender<()>, - terminate: channel::Sender<()>, -} - -impl Node { - async fn new(ctx: &ctx::Ctx, network: network::Config, setup: &Setup) -> (Self, NodeRunner) { - Self::new_with_first(ctx, network, setup, setup.genesis.fork.first_block).await - } - - async fn new_with_first( - ctx: &ctx::Ctx, - network: network::Config, - setup: &Setup, - first: validator::BlockNumber, - ) -> (Self, NodeRunner) { - let (store, store_runner) = new_store_with_first(ctx, &setup.genesis, first).await; - let (start_send, start_recv) = channel::bounded(1); - let (terminate_send, terminate_recv) = channel::bounded(1); - - let runner = NodeRunner { - network, - store: store.clone(), - store_runner, - start: start_recv, - terminate: terminate_recv, - }; - let this = Self { - store, - start: start_send, - terminate: terminate_send, - }; - (this, runner) - } - - fn start(&self) { - let _ = self.start.try_send(()); - } - - async fn terminate(&self, ctx: &ctx::Ctx) -> ctx::OrCanceled<()> { - let _ = self.terminate.try_send(()); - self.terminate.closed(ctx).await - } -} - -#[must_use] -struct NodeRunner { - network: network::Config, - store: Arc, - store_runner: BlockStoreRunner, - start: channel::Receiver<()>, - terminate: channel::Receiver<()>, -} - -impl NodeRunner { - async fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { - tracing::info!("NodeRunner::run()"); - let key = self.network.gossip.key.public(); - let (sync_blocks_actor_pipe, sync_blocks_dispatcher_pipe) = pipe::new(); - let (mut network, network_runner) = - network::testonly::Instance::new(self.network.clone(), self.store.clone()); - let sync_blocks_config = Config::new(); - let res = scope::run!(ctx, |ctx, s| async { - s.spawn_bg(self.store_runner.run(ctx)); - s.spawn_bg(network_runner.run(ctx)); - network.wait_for_gossip_connections().await; - tracing::info!("Node connected to peers"); - - self.start.recv(ctx).await?; - tracing::info!("switch_on"); - s.spawn_bg( - async { - Self::run_executor(ctx, sync_blocks_dispatcher_pipe, network.pipe()) - .await - .with_context(|| format!("executor for {key:?}")) - } - .instrument(tracing::info_span!("mock_executor", ?key)), - ); - s.spawn_bg(sync_blocks_config.run(ctx, sync_blocks_actor_pipe, self.store.clone())); - tracing::info!("Node is fully started"); - - let _ = self.terminate.recv(ctx).await; - tracing::info!("stopping"); - Ok(()) - }) - .await; - drop(self.terminate); - tracing::info!("node stopped"); - res - } - - async fn run_executor( - ctx: &ctx::Ctx, - mut sync_blocks_dispatcher_pipe: pipe::DispatcherPipe, - network_dispatcher_pipe: &mut NetworkDispatcherPipe, - ) -> anyhow::Result<()> { - scope::run!(ctx, |ctx, s| async { - s.spawn(async { - while let Ok(message) = network_dispatcher_pipe.recv.recv(ctx).await { - tracing::trace!(?message, "Received network message"); - match message { - network::io::OutputMessage::SyncBlocks(req) => { - sync_blocks_dispatcher_pipe.send.send(req.into()); - } - _ => unreachable!("consensus messages should not be produced"), - } - } - Ok(()) - }); - - while let Ok(message) = sync_blocks_dispatcher_pipe.recv.recv(ctx).await { - let OutputMessage::Network(message) = message; - tracing::trace!(?message, "Received sync blocks message"); - network_dispatcher_pipe.send.send(message.into()); - } - Ok(()) - }) - .await - } -} - -#[async_trait] -trait GossipNetworkTest: fmt::Debug + Send { - /// Returns the number of nodes in the gossip network and number of peers for each node. - fn network_params(&self) -> (usize, usize); - async fn test(self, ctx: &ctx::Ctx, setup: &Setup, network: Vec) -> anyhow::Result<()>; -} - -#[instrument(level = "trace")] -async fn test_sync_blocks(test: T) { - abort_on_panic(); - let _guard = set_timeout(TEST_TIMEOUT); - let ctx = &ctx::test_root(&ctx::AffineClock::new(25.)); - let rng = &mut ctx.rng(); - let (node_count, gossip_peers) = test.network_params(); - - let mut setup = validator::testonly::Setup::new(rng, node_count); - setup.push_blocks(rng, 10); - scope::run!(ctx, |ctx, s| async { - let mut nodes = vec![]; - for (i, net) in network::testonly::new_configs(rng, &setup, gossip_peers) - .into_iter() - .enumerate() - { - let (node, runner) = Node::new(ctx, net, &setup).await; - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); - nodes.push(node); - } - test.test(ctx, &setup, nodes).await - }) - .await - .unwrap(); -} - -#[derive(Debug)] -struct BasicSynchronization { - node_count: usize, - gossip_peers: usize, -} - -#[async_trait] -impl GossipNetworkTest for BasicSynchronization { - fn network_params(&self) -> (usize, usize) { - (self.node_count, self.gossip_peers) - } - - async fn test(self, ctx: &ctx::Ctx, setup: &Setup, nodes: Vec) -> anyhow::Result<()> { - let rng = &mut ctx.rng(); - - tracing::info!("Check initial node states"); - for node in &nodes { - node.start(); - let state = node.store.queued(); - assert_eq!(state.first, setup.genesis.fork.first_block); - assert_eq!(state.last, None); - } - - for block in &setup.blocks[0..5] { - let node = nodes.choose(rng).unwrap(); - node.store.queue_block(ctx, block.clone()).await.unwrap(); - - tracing::info!("Wait until all nodes get block #{}", block.number()); - for node in &nodes { - node.store.wait_until_persisted(ctx, block.number()).await?; - } - } - - let node = nodes.choose(rng).unwrap(); - scope::run!(ctx, |ctx, s| async { - // Add a batch of blocks. - for block in setup.blocks[5..].iter().rev() { - s.spawn_bg(node.store.queue_block(ctx, block.clone())); - } - - // Wait until nodes get all new blocks. - let last = setup.blocks.last().unwrap().number(); - for node in &nodes { - node.store.wait_until_persisted(ctx, last).await?; - } - Ok(()) - }) - .await?; - Ok(()) - } -} - -#[test_casing(5, [2, 3, 5, 7, 10])] -#[tokio::test(flavor = "multi_thread")] -async fn basic_synchronization_with_single_peer(node_count: usize) { - test_sync_blocks(BasicSynchronization { - node_count, - gossip_peers: 1, - }) - .await; -} - -#[test_casing(5, [(3, 2), (5, 2), (5, 3), (7, 2), (7, 3)])] -#[tokio::test(flavor = "multi_thread")] -async fn basic_synchronization_with_multiple_peers(node_count: usize, gossip_peers: usize) { - test_sync_blocks(BasicSynchronization { - node_count, - gossip_peers, - }) - .await; -} - -#[derive(Debug)] -struct SwitchingOffNodes { - node_count: usize, -} - -#[async_trait] -impl GossipNetworkTest for SwitchingOffNodes { - fn network_params(&self) -> (usize, usize) { - // Ensure that each node is connected to all others via an inbound or outbound channel - (self.node_count, self.node_count / 2) - } - - async fn test(self, ctx: &ctx::Ctx, setup: &Setup, mut nodes: Vec) -> anyhow::Result<()> { - let rng = &mut ctx.rng(); - nodes.shuffle(rng); - - for node in &nodes { - node.start(); - } - - for i in 0..nodes.len() { - tracing::info!("{} nodes left", nodes.len() - i); - let block = &setup.blocks[i]; - nodes[i..] - .choose(rng) - .unwrap() - .store - .queue_block(ctx, block.clone()) - .await - .unwrap(); - tracing::info!("block {} inserted", block.number()); - - // Wait until all remaining nodes get the new block. - for node in &nodes[i..] { - node.store.wait_until_persisted(ctx, block.number()).await?; - } - tracing::info!("All nodes received block #{}", block.number()); - - // Terminate a random node. - // We start switching off only after the first round, to make sure all nodes are fully - // started. - nodes[i].terminate(ctx).await.unwrap(); - } - tracing::info!("test finished, terminating"); - Ok(()) - } -} - -#[test_casing(5, 3..=7)] -#[tokio::test(flavor = "multi_thread")] -async fn switching_off_nodes(node_count: usize) { - test_sync_blocks(SwitchingOffNodes { node_count }).await; -} - -#[derive(Debug)] -struct SwitchingOnNodes { - node_count: usize, -} - -#[async_trait] -impl GossipNetworkTest for SwitchingOnNodes { - fn network_params(&self) -> (usize, usize) { - (self.node_count, self.node_count / 2) - } - - async fn test(self, ctx: &ctx::Ctx, setup: &Setup, mut nodes: Vec) -> anyhow::Result<()> { - let rng = &mut ctx.rng(); - nodes.shuffle(rng); - for i in 0..nodes.len() { - nodes[i].start(); // Switch on a node. - let block = &setup.blocks[i]; - nodes[0..i + 1] - .choose(rng) - .unwrap() - .store - .queue_block(ctx, block.clone()) - .await - .unwrap(); - - // Wait until all switched on nodes get the new block. - for node in &nodes[0..i + 1] { - node.store.wait_until_persisted(ctx, block.number()).await?; - } - tracing::trace!("All nodes received block #{}", block.number()); - } - Ok(()) - } -} - -#[test_casing(5, 3..=7)] -#[tokio::test(flavor = "multi_thread")] -async fn switching_on_nodes(node_count: usize) { - test_sync_blocks(SwitchingOnNodes { node_count }).await; -} - -/// Test checking that nodes with different first block can synchronize. -#[tokio::test(flavor = "multi_thread")] -async fn test_different_first_block() { - abort_on_panic(); - let ctx = &ctx::test_root(&ctx::AffineClock::new(25.)); - let rng = &mut ctx.rng(); - - let mut setup = validator::testonly::Setup::new(rng, 2); - let n = 4; - setup.push_blocks(rng, 10); - scope::run!(ctx, |ctx, s| async { - let mut nodes = vec![]; - // Spawn `n` nodes, all connected to each other. - for (i, net) in network::testonly::new_configs(rng, &setup, n) - .into_iter() - .enumerate() - { - // Choose the first block for the node at random. - let first = setup.blocks.choose(rng).unwrap().number(); - let (node, runner) = Node::new_with_first(ctx, net, &setup, first).await; - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); - node.start(); - nodes.push(node); - } - // Randomize the order of nodes. - nodes.shuffle(rng); - - for block in &setup.blocks { - // Find nodes interested in the next block. - let interested_nodes: Vec<_> = nodes - .iter() - .filter(|n| n.store.queued().first <= block.number()) - .collect(); - // Store this block to one of them. - if let Some(node) = interested_nodes.choose(rng) { - node.store.queue_block(ctx, block.clone()).await.unwrap(); - } - // Wait until all remaining nodes get the new block. - for node in interested_nodes { - node.store - .wait_until_persisted(ctx, block.number()) - .await - .unwrap(); - } - } - Ok(()) - }) - .await - .unwrap(); -} diff --git a/node/actors/sync_blocks/src/tests/mod.rs b/node/actors/sync_blocks/src/tests/mod.rs deleted file mode 100644 index c7273669..00000000 --- a/node/actors/sync_blocks/src/tests/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -//! Tests for the block syncing actor. -use super::*; -use zksync_concurrency::time; -use zksync_consensus_network::io::GetBlockError; -use zksync_consensus_roles::validator::{self, testonly::Setup}; -use zksync_consensus_storage::{BlockStore, BlockStoreRunner, BlockStoreState}; -use zksync_consensus_utils::pipe; - -mod end_to_end; - -const TEST_TIMEOUT: time::Duration = time::Duration::seconds(20); - -pub(crate) fn sync_state(setup: &Setup, last: Option<&validator::FinalBlock>) -> BlockStoreState { - BlockStoreState { - first: setup.genesis.fork.first_block, - last: last.map(|b| b.justification.clone()), - } -} - -pub(crate) fn make_response( - block: Option<&validator::FinalBlock>, -) -> Result { - block.cloned().ok_or(GetBlockError::NotAvailable) -} diff --git a/node/deny.toml b/node/deny.toml index 9e0cf1c3..78cba495 100644 --- a/node/deny.toml +++ b/node/deny.toml @@ -59,7 +59,6 @@ multiple-versions = "deny" skip = [ # Old versions required by tempfile and prost-build. { name = "bitflags", version = "1.3.2" }, - { name = "heck", version = "0.4.1" }, # Old version required by tracing-subscriber. { name = "regex-automata", version = "0.1.10" }, @@ -75,7 +74,10 @@ skip = [ { name = "digest", version = "0.10.7" }, # Old versions required by kube. - { name = "strsim", version = "0.10.0" } + { name = "strsim", version = "0.10.0" }, + + # Old versions required by k8s-openapi. + { name = "base64", version = "0.21.7" } ] [sources] diff --git a/node/libs/concurrency/src/ctx/mod.rs b/node/libs/concurrency/src/ctx/mod.rs index f86275df..6af9af06 100644 --- a/node/libs/concurrency/src/ctx/mod.rs +++ b/node/libs/concurrency/src/ctx/mod.rs @@ -11,16 +11,6 @@ //! Instead of "awaiting for new data on the channel", you "await for new data on the channel OR //! for context to get canceled". This way you can implement graceful shutdown //! in a very uniform way. -//! -//! Contrary to the golang implementation, we pass the context implicitly -//! in the thread-local memory. Implicit passing may look like magic, however -//! * it is built on top of `tokio::Runtime` which is also passed implicitly, -//! so the concept should be familiar for the tokio users. -//! * it prevents misuse of context, as what we actually try to model here -//! is a reader monad, which in essence is equivalent to implicit argument passing -//! (https://hackage.haskell.org/package/mtl-2.3.1/docs/Control-Monad-Reader.html) -//! * it presumably makes it easier to onboard new users, without having to add an explicit -//! context argument to all functions in their codebase. use crate::{signal, time}; use std::{fmt, future::Future, pin::Pin, sync::Arc, task}; @@ -149,9 +139,9 @@ impl Ctx { _parent: Some(self.0.clone()), })); // Spawn a task propagating task cancelation. - // This task takes references to only to the `canceled` signals - // of parent and child to avoid a reference loop (rather than - // the whole context object) to avoid a memory leak: + // This task takes references only to the `canceled` signals + // of parent and child (rather that the whole context object) + // to avoid a reference loop and therefore a memory leak: // context is automatically canceled when dropped, which // guarantees that this task eventually completes. tokio::spawn(async move { @@ -173,13 +163,6 @@ impl Ctx { CtxAware(self.0.canceled.cancel_safe_recv()) } - /// Awaits until the local context gets canceled. Unlike [`Self::canceled()`], the returned - /// future has a static lifetime. - pub fn canceled_owned(&self) -> impl Future { - let canceled = self.0.canceled.clone(); - async move { canceled.cancel_safe_recv().await } - } - /// Checks if this context is still active (i.e., not canceled). pub fn is_active(&self) -> bool { !self.0.canceled.try_recv() diff --git a/node/libs/concurrency/src/sync/mod.rs b/node/libs/concurrency/src/sync/mod.rs index 7b5587a6..43cd5c0b 100644 --- a/node/libs/concurrency/src/sync/mod.rs +++ b/node/libs/concurrency/src/sync/mod.rs @@ -129,6 +129,8 @@ pub async fn wait_for<'a, T>( recv: &'a mut watch::Receiver, pred: impl Fn(&T) -> bool, ) -> ctx::OrCanceled> { + // TODO(gprusak): wait_for is not documented to be cancel-safe. + // We should use changed() instead. if let Ok(res) = ctx.wait(recv.wait_for(pred)).await? { return Ok(res); } diff --git a/node/libs/concurrency/src/time.rs b/node/libs/concurrency/src/time.rs index 718317c6..4e5581c0 100644 --- a/node/libs/concurrency/src/time.rs +++ b/node/libs/concurrency/src/time.rs @@ -6,6 +6,7 @@ pub type Duration = time::Duration; /// Monotonic clock time. +#[allow(deprecated)] pub type Instant = time::Instant; /// UTC time in nanoseconds precision. From 26488ec07b5fdea9a3ffbcdb76a3201b84020d7f Mon Sep 17 00:00:00 2001 From: Nacho Avecilla Date: Thu, 25 Apr 2024 10:37:51 -0300 Subject: [PATCH 22/79] Weighted attesters (#102) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add weights to the attester just like we do with the validators. ## Why ❔ To be able to give our node more importance than the other nodes, producing and finalizing blocks only if they are signed by our node. --- node/Cargo.lock | 6 +- node/libs/roles/src/attester/conv.rs | 82 ++++++++- .../libs/roles/src/attester/keys/signature.rs | 2 +- .../roles/src/attester/messages/l1_batch.rs | 35 ++-- node/libs/roles/src/attester/messages/msg.rs | 157 +++++++++++------- node/libs/roles/src/attester/testonly.rs | 38 ++++- node/libs/roles/src/attester/tests.rs | 136 ++++++++++++--- node/libs/roles/src/proto/attester.proto | 21 +++ node/libs/roles/src/validator/conv.rs | 17 +- .../roles/src/validator/messages/consensus.rs | 6 +- .../roles/src/validator/messages/tests.rs | 14 +- node/libs/roles/src/validator/testonly.rs | 10 +- node/libs/roles/src/validator/tests.rs | 8 +- 13 files changed, 412 insertions(+), 120 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 5317aa5b..778ffc40 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -1588,7 +1588,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.48.5", ] [[package]] @@ -2503,9 +2503,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring", diff --git a/node/libs/roles/src/attester/conv.rs b/node/libs/roles/src/attester/conv.rs index d30002ad..d4c3d42b 100644 --- a/node/libs/roles/src/attester/conv.rs +++ b/node/libs/roles/src/attester/conv.rs @@ -4,7 +4,10 @@ use zksync_consensus_crypto::ByteFmt; use zksync_consensus_utils::enum_util::Variant; use zksync_protobuf::{read_required, required, ProtoFmt}; -use super::{BatchNumber, L1Batch, Msg, PublicKey, Signature, SignedBatchMsg}; +use super::{ + AggregateSignature, BatchNumber, L1Batch, L1BatchQC, Msg, MsgHash, PublicKey, Signature, + SignedBatchMsg, Signers, WeightedAttester, +}; impl ProtoFmt for L1Batch { type Proto = proto::L1Batch; @@ -82,3 +85,80 @@ impl ProtoFmt for Signature { } } } + +impl ProtoFmt for WeightedAttester { + type Proto = proto::WeightedAttester; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + key: read_required(&r.key).context("key")?, + weight: *required(&r.weight).context("weight")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + key: Some(self.key.build()), + weight: Some(self.weight), + } + } +} + +impl ProtoFmt for Signers { + type Proto = zksync_protobuf::proto::std::BitVector; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self(ProtoFmt::read(r)?)) + } + + fn build(&self) -> Self::Proto { + self.0.build() + } +} + +impl ProtoFmt for AggregateSignature { + type Proto = proto::AggregateSignature; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self(ByteFmt::decode(required(&r.bn254)?)?)) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + bn254: Some(self.0.encode()), + } + } +} + +impl ProtoFmt for MsgHash { + type Proto = proto::MsgHash; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self(ByteFmt::decode(required(&r.keccak256)?)?)) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + keccak256: Some(self.0.encode()), + } + } +} + +impl ProtoFmt for L1BatchQC { + type Proto = proto::L1BatchQc; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + message: read_required(&r.msg).context("message")?, + signers: read_required(&r.signers).context("signers")?, + signature: read_required(&r.sig).context("signature")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + msg: Some(self.message.build()), + signers: Some(self.signers.build()), + sig: Some(self.signature.build()), + } + } +} diff --git a/node/libs/roles/src/attester/keys/signature.rs b/node/libs/roles/src/attester/keys/signature.rs index 82dfa8f5..f4d5f3cb 100644 --- a/node/libs/roles/src/attester/keys/signature.rs +++ b/node/libs/roles/src/attester/keys/signature.rs @@ -4,7 +4,7 @@ use super::PublicKey; use std::fmt; use zksync_consensus_crypto::{bn254, ByteFmt, Text, TextFmt}; -/// A signature of an L1 batch from a validator. +/// A signature of an L1 batch from an attester. #[derive(Clone, PartialEq, Eq)] pub struct Signature(pub(crate) bn254::Signature); diff --git a/node/libs/roles/src/attester/messages/l1_batch.rs b/node/libs/roles/src/attester/messages/l1_batch.rs index 22ef1aa6..f06dac60 100644 --- a/node/libs/roles/src/attester/messages/l1_batch.rs +++ b/node/libs/roles/src/attester/messages/l1_batch.rs @@ -9,8 +9,8 @@ use super::{SignedBatchMsg, Signers}; /// A batch number. pub struct BatchNumber(pub u64); -/// A message to send by validators to the gossip network. -/// It contains the validators signature to sign the block batches to be sent to L1. +/// A message to send by attesters to the gossip network. +/// It contains the attester signature to sign the block batches to be sent to L1. #[derive(Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct L1Batch { /// The number of the batch. @@ -18,12 +18,12 @@ pub struct L1Batch { } /// A certificate for a batch of L2 blocks to be sent to L1. -/// It contains the signatures of the validators that signed the batch. +/// It contains the signatures of the attesters that signed the batch. #[derive(Debug, Clone, Eq, PartialEq)] pub struct L1BatchQC { /// The aggregate signature of the signed L1 batches. pub signature: AggregateSignature, - /// The validators that signed this message. + /// The attesters that signed this message. pub signers: Signers, /// The message that was signed. pub message: L1Batch, @@ -39,14 +39,17 @@ pub enum L1BatchQCVerifyError { #[error("not enough signers: got {got}, want {want}")] NotEnoughSigners { /// Got signers. - got: usize, + got: u64, /// Want signers. - want: usize, + want: u64, }, + /// Bad signer set. + #[error("signers set doesn't match genesis")] + BadSignersSet, } impl L1BatchQC { - /// Create a new empty instance for a given `ReplicaCommit` message and a validator set size. + /// Create a new empty instance for a given `L1Batch` message. pub fn new(message: L1Batch, genesis: &Genesis) -> Self { Self { message, @@ -73,25 +76,29 @@ impl L1BatchQC { /// Verifies the signature of the L1BatchQC. pub fn verify(&self, genesis: &Genesis) -> Result<(), L1BatchQCVerifyError> { - // Verify that we have enough signers. - let num_signers = self.signers.count(); + use L1BatchQCVerifyError as Error; + if self.signers.len() != genesis.attesters.len() { + return Err(Error::BadSignersSet); + } + // Verify the signers' weight is enough. + let weight = genesis.attesters.weight(&self.signers); let threshold = genesis.attesters.threshold(); - if num_signers < threshold { - return Err(L1BatchQCVerifyError::NotEnoughSigners { - got: num_signers, + if weight < threshold { + return Err(Error::NotEnoughSigners { + got: weight, want: threshold, }); } let messages_and_keys = genesis .attesters - .iter() + .iter_keys() .enumerate() .filter(|(i, _)| self.signers.0[*i]) .map(|(_, pk)| (self.message.clone(), pk)); self.signature .verify_messages(messages_and_keys) - .map_err(L1BatchQCVerifyError::BadSignature) + .map_err(Error::BadSignature) } } diff --git a/node/libs/roles/src/attester/messages/msg.rs b/node/libs/roles/src/attester/messages/msg.rs index 241fd630..f4f5c0a2 100644 --- a/node/libs/roles/src/attester/messages/msg.rs +++ b/node/libs/roles/src/attester/messages/msg.rs @@ -1,17 +1,16 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - fmt, -}; +use std::{collections::BTreeMap, fmt}; use crate::{ attester::{L1Batch, PublicKey, Signature}, validator::ViewNumber, }; +use anyhow::Context; use bit_vec::BitVec; use zksync_consensus_crypto::{keccak256, ByteFmt, Text, TextFmt}; use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; /// Message that is sent by an attester. +#[derive(Clone, Debug, PartialEq, Eq)] pub enum Msg { /// L1 batch message. L1Batch(L1Batch), @@ -46,8 +45,8 @@ pub struct SignedBatchMsg> { pub sig: Signature, } -/// Struct that represents a bit map of validators. We use it to compactly store -/// which validators signed a given message. +/// Struct that represents a bit map of attesters. We use it to compactly store +/// which attesters signed a given L1Batch message. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct Signers(pub BitVec); @@ -57,13 +56,13 @@ impl Signers { Self(BitVec::from_elem(n, false)) } - /// Returns the number of signers, i.e. the number of validators that signed + /// Returns the number of signers, i.e. the number of attesters that signed /// the particular message that this signer bitmap refers to. pub fn count(&self) -> usize { self.0.iter().filter(|b| *b).count() } - /// Size of the corresponding ValidatorSet. + /// Size of the corresponding attester::Committee. pub fn len(&self) -> usize { self.0.len() } @@ -74,90 +73,127 @@ impl Signers { } } -/// A struct that represents a set of validators. It is used to store the current validator set. -/// We represent each validator by its validator public key. +/// A struct that represents a set of attesters. It is used to store the current attester set. +/// We represent each attester by its attester public key. #[derive(Clone, Debug, PartialEq, Eq, Default)] -pub struct AttesterSet { - vec: Vec, - map: BTreeMap, +pub struct Committee { + vec: Vec, + indexes: BTreeMap, + total_weight: u64, } -impl AttesterSet { - /// Creates a new AttesterSet from a list of validator public keys. - pub fn new(attesters: impl IntoIterator) -> anyhow::Result { - let mut set = BTreeSet::new(); +impl Committee { + /// Creates a new Committee from a list of attester public keys. + pub fn new(attesters: impl IntoIterator) -> anyhow::Result { + let mut weighted_attester = BTreeMap::new(); + let mut total_weight: u64 = 0; for attester in attesters { - anyhow::ensure!(set.insert(attester), "Duplicate validator in ValidatorSet"); + anyhow::ensure!( + !weighted_attester.contains_key(&attester.key), + "Duplicated attester in attester Committee" + ); + anyhow::ensure!( + attester.weight > 0, + "Attester weight has to be a positive value" + ); + total_weight = total_weight + .checked_add(attester.weight) + .context("Sum of weights overflows in attester Committee")?; + weighted_attester.insert(attester.key.clone(), attester); } anyhow::ensure!( - !set.is_empty(), - "ValidatorSet must contain at least one validator" + !weighted_attester.is_empty(), + "Attester Committee must contain at least one attester" ); Ok(Self { - vec: set.iter().cloned().collect(), - map: set.into_iter().enumerate().map(|(i, pk)| (pk, i)).collect(), + vec: weighted_attester.values().cloned().collect(), + indexes: weighted_attester + .values() + .enumerate() + .map(|(i, v)| (v.key.clone(), i)) + .collect(), + total_weight, }) } - /// Iterates over validators. - pub fn iter(&self) -> impl Iterator { + /// Iterates over weighted attesters. + pub fn iter(&self) -> impl Iterator { self.vec.iter() } - /// Returns the number of validators. - #[allow(clippy::len_without_is_empty)] // a valid `ValidatorSet` is always non-empty by construction + /// Iterates over attester keys. + pub fn iter_keys(&self) -> impl Iterator { + self.vec.iter().map(|v| &v.key) + } + + /// Returns the number of attesters. + #[allow(clippy::len_without_is_empty)] // a valid `Committee` is always non-empty by construction pub fn len(&self) -> usize { self.vec.len() } - /// Returns true if the given validator is in the validator set. - pub fn contains(&self, validator: &PublicKey) -> bool { - self.map.contains_key(validator) + /// Returns true if the given attester is in the attester committee. + pub fn contains(&self, attester: &PublicKey) -> bool { + self.indexes.contains_key(attester) } - /// Get validator by its index in the set. - pub fn get(&self, index: usize) -> Option<&PublicKey> { + /// Get attester by its index in the committee. + pub fn get(&self, index: usize) -> Option<&WeightedAttester> { self.vec.get(index) } - /// Get the index of a validator in the set. - pub fn index(&self, validator: &PublicKey) -> Option { - self.map.get(validator).copied() + /// Get the index of a attester in the committee. + pub fn index(&self, attester: &PublicKey) -> Option { + self.indexes.get(attester).copied() } - /// Computes the validator for the given view. + /// Computes the leader for the given view. pub fn view_leader(&self, view_number: ViewNumber) -> PublicKey { let index = view_number.0 as usize % self.len(); - self.get(index).unwrap().clone() + self.get(index).unwrap().key.clone() + } + + /// Signature weight threshold for this attester committee. + pub fn threshold(&self) -> u64 { + threshold(self.total_weight()) + } + + /// Compute the sum of signers weights. + pub fn weight(&self, signers: &Signers) -> u64 { + assert_eq!(self.vec.len(), signers.len()); + self.vec + .iter() + .enumerate() + .filter(|(i, _)| signers.0[*i]) + .map(|(_, v)| v.weight) + .sum() } - /// Signature threshold for this validator set. - pub fn threshold(&self) -> usize { - threshold(self.len()) + /// Sum of all attesters weight in the committee + pub fn total_weight(&self) -> u64 { + self.total_weight } - /// Maximal number of faulty replicas allowed in this validator set. - pub fn faulty_replicas(&self) -> usize { - faulty_replicas(self.len()) + /// Maximal weight of faulty replicas allowed in this attester committee. + pub fn max_faulty_weight(&self) -> u64 { + max_faulty_weight(self.total_weight()) } } -/// Calculate the maximum number of faulty replicas, for a given number of replicas. -pub fn faulty_replicas(n: usize) -> usize { - // Calculate the allowed maximum number of faulty replicas. We want the following relationship to hold: - // n = 5*f + 1 - // for n total replicas and f faulty replicas. This results in the following formula for the maximum - // number of faulty replicas: - // f = floor((n - 1) / 5) - // Because of this, it doesn't make sense to have 5*f + 2 or 5*f + 3 replicas. It won't increase the number - // of allowed faulty replicas. - (n - 1) / 5 +/// Calculate the consensus threshold, the minimum votes' weight for any consensus action to be valid, +/// for a given committee total weight. +pub fn threshold(total_weight: u64) -> u64 { + total_weight - max_faulty_weight(total_weight) } -/// Calculate the consensus threshold, the minimum number of votes for any consensus action to be valid, -/// for a given number of replicas. -pub fn threshold(n: usize) -> usize { - n - faulty_replicas(n) +/// Calculate the maximum allowed weight for faulty replicas, for a given total weight. +pub fn max_faulty_weight(total_weight: u64) -> u64 { + // Calculate the allowed maximum weight of faulty replicas. We want the following relationship to hold: + // n = 5*f + 1 + // for n total weight and f faulty weight. This results in the following formula for the maximum + // weight of faulty replicas: + // f = floor((n - 1) / 5) + (total_weight - 1) / 5 } impl std::ops::BitOrAssign<&Self> for Signers { @@ -232,3 +268,12 @@ impl + Clone> SignedBatchMsg { }) } } + +/// Attester representation inside a Committee. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WeightedAttester { + /// Attester key + pub key: PublicKey, + /// Attester weight inside the Committee. + pub weight: u64, +} diff --git a/node/libs/roles/src/attester/testonly.rs b/node/libs/roles/src/attester/testonly.rs index 362c78fe..7d7b0ea9 100644 --- a/node/libs/roles/src/attester/testonly.rs +++ b/node/libs/roles/src/attester/testonly.rs @@ -1,7 +1,8 @@ use super::{ - AggregateSignature, AttesterSet, L1Batch, Msg, MsgHash, PublicKey, SecretKey, Signature, - SignedBatchMsg, + AggregateSignature, Committee, L1Batch, L1BatchQC, Msg, MsgHash, PublicKey, SecretKey, + Signature, SignedBatchMsg, Signers, WeightedAttester, }; +use bit_vec::BitVec; use rand::{ distributions::{Distribution, Standard}, Rng, @@ -38,11 +39,14 @@ impl Distribution for Standard { } } -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> AttesterSet { +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Committee { let count = rng.gen_range(1..11); - let public_keys = (0..count).map(|_| rng.gen()); - AttesterSet::new(public_keys).unwrap() + let public_keys = (0..count).map(|_| WeightedAttester { + key: rng.gen(), + weight: 1, + }); + Committee::new(public_keys).unwrap() } } @@ -52,6 +56,28 @@ impl Distribution for Standard { } } +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> L1BatchQC { + L1BatchQC { + message: rng.gen(), + signers: rng.gen(), + signature: rng.gen(), + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Msg { + Msg::L1Batch(rng.gen()) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Signers { + Signers(BitVec::from_bytes(&rng.gen::<[u8; 4]>())) + } +} + impl Distribution for Standard { fn sample(&self, rng: &mut R) -> Signature { Signature(rng.gen()) diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index ce769f3b..3a67af20 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -1,49 +1,85 @@ -use crate::validator::{testonly::Setup, Committee, Genesis}; +use crate::{ + attester, + validator::{testonly::Setup, Committee, Genesis}, +}; use super::*; use assert_matches::assert_matches; use rand::Rng; use zksync_concurrency::ctx; use zksync_consensus_crypto::{ByteFmt, Text, TextFmt}; -use zksync_protobuf::testonly::{test_encode, test_encode_random}; +use zksync_protobuf::testonly::test_encode_random; #[test] fn test_byte_encoding() { - let key = SecretKey::generate(); + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let sk: SecretKey = rng.gen(); assert_eq!( - key.public(), - ::decode(&ByteFmt::encode(&key)) + sk.public(), + ::decode(&ByteFmt::encode(&sk)) .unwrap() .public() ); + + let pk: PublicKey = rng.gen(); + assert_eq!(pk, ByteFmt::decode(&ByteFmt::encode(&pk)).unwrap()); + + let sig: Signature = rng.gen(); + assert_eq!(sig, ByteFmt::decode(&ByteFmt::encode(&sig)).unwrap()); + + let agg_sig: AggregateSignature = rng.gen(); assert_eq!( - key.public(), - ByteFmt::decode(&ByteFmt::encode(&key.public())).unwrap() + agg_sig, + ByteFmt::decode(&ByteFmt::encode(&agg_sig)).unwrap() ); } #[test] fn test_text_encoding() { - let key = SecretKey::generate(); - let t1 = TextFmt::encode(&key); - let t2 = TextFmt::encode(&key.public()); + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let sk: SecretKey = rng.gen(); + let t = TextFmt::encode(&sk); assert_eq!( - key.public(), - Text::new(&t1).decode::().unwrap().public() + sk.public(), + Text::new(&t).decode::().unwrap().public() ); - assert_eq!(key.public(), Text::new(&t2).decode().unwrap()); - assert!(Text::new(&t1).decode::().is_err()); - assert!(Text::new(&t2).decode::().is_err()); + + let pk: PublicKey = rng.gen(); + let t = TextFmt::encode(&pk); + assert_eq!(pk, Text::new(&t).decode::().unwrap()); + + let sig: Signature = rng.gen(); + let t = TextFmt::encode(&sig); + assert_eq!(sig, Text::new(&t).decode::().unwrap()); + + let agg_sig: AggregateSignature = rng.gen(); + let t = TextFmt::encode(&agg_sig); + assert_eq!( + agg_sig, + Text::new(&t).decode::().unwrap() + ); + + let msg_hash: MsgHash = rng.gen(); + let t = TextFmt::encode(&msg_hash); + assert_eq!(msg_hash, Text::new(&t).decode::().unwrap()); } #[test] fn test_schema_encoding() { - let ctx = &ctx::test_root(&ctx::RealClock); + let ctx = ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); test_encode_random::>(rng); - let key = rng.gen::().public(); - test_encode(rng, &key); + test_encode_random::(rng); + test_encode_random::(rng); + test_encode_random::(rng); + test_encode_random::(rng); + test_encode_random::(rng); test_encode_random::(rng); + test_encode_random::(rng); } #[test] @@ -110,18 +146,21 @@ fn test_l1_batch_qc() { let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let genesis3 = Genesis { - version: setup1.genesis.version, validators: Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), - attesters: AttesterSet::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(), + attesters: attester::Committee::new(setup1.genesis.attesters.iter().take(3).cloned()) + .unwrap(), fork: setup1.genesis.fork.clone(), + ..Default::default() }; + let attester_weight = setup1.genesis.attesters.total_weight() / 6; for i in 0..setup1.attester_keys.len() + 1 { let mut qc = L1BatchQC::new(L1Batch::default(), &setup1.genesis); for key in &setup1.attester_keys[0..i] { qc.add(&key.sign_batch_msg(qc.message.clone()), &setup1.genesis); } - if i >= setup1.genesis.attesters.threshold() { + let expected_weight = i as u64 * attester_weight; + if expected_weight >= setup1.genesis.attesters.threshold() { assert!(qc.verify(&setup1.genesis).is_ok()); } else { assert_matches!( @@ -130,8 +169,61 @@ fn test_l1_batch_qc() { ); } - // Mismatching validator sets. + // Mismatching attesters sets. assert!(qc.verify(&setup2.genesis).is_err()); assert!(qc.verify(&genesis3).is_err()); } } + +#[test] +fn test_attester_committee_weights() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + // Attesters with non-uniform weights + let setup = Setup::new_with_weights(rng, vec![1000, 600, 800, 6000, 900, 700]); + // Expected sum of the attesters weights + let sums = [1000, 1600, 2400, 8400, 9300, 10000]; + + let msg = L1Batch::default(); + let mut qc = L1BatchQC::new(msg.clone(), &setup.genesis); + for (n, weight) in sums.iter().enumerate() { + let key = &setup.attester_keys[n]; + qc.add(&key.sign_batch_msg(msg.clone()), &setup.genesis); + assert_eq!(setup.genesis.attesters.weight(&qc.signers), *weight); + } +} + +#[test] +fn test_committee_weights_overflow_check() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let attesters: Vec = [u64::MAX / 5; 6] + .iter() + .map(|w| WeightedAttester { + key: rng.gen::().public(), + weight: *w, + }) + .collect(); + + // Creation should overflow + assert_matches!(attester::Committee::new(attesters), Err(_)); +} + +#[test] +fn test_committee_with_zero_weights() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let attesters: Vec = [1000, 0, 800, 6000, 0, 700] + .iter() + .map(|w| WeightedAttester { + key: rng.gen::().public(), + weight: *w, + }) + .collect(); + + // Committee creation should error on zero weight attesters + assert_matches!(attester::Committee::new(attesters), Err(_)); +} diff --git a/node/libs/roles/src/proto/attester.proto b/node/libs/roles/src/proto/attester.proto index 1e315d29..9bedebff 100644 --- a/node/libs/roles/src/proto/attester.proto +++ b/node/libs/roles/src/proto/attester.proto @@ -2,10 +2,18 @@ syntax = "proto3"; package zksync.roles.attester; +import "zksync/std.proto"; + message L1Batch { optional uint64 number = 1; // required } +message L1BatchQC { + optional L1Batch msg = 1; // required + optional std.BitVector signers = 2; // required + optional AggregateSignature sig = 3; // required +} + message Msg { oneof t { // required L1Batch l1_batch = 4; @@ -25,3 +33,16 @@ message PublicKey { message Signature { optional bytes bn254 = 1; // required } + +message WeightedAttester { + optional PublicKey key = 1; // required + optional uint64 weight = 2; // required +} + +message AggregateSignature { + optional bytes bn254 = 1; // required +} + +message MsgHash { + optional bytes keccak256 = 1; // required +} diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 2ddef725..98f6043f 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,5 +1,5 @@ use crate::{ - attester::{self, AttesterSet}, + attester::{self, WeightedAttester}, node::SessionId, }; @@ -71,14 +71,19 @@ impl ProtoFmt for Genesis { .attesters .iter() .enumerate() - .map(|(i, v)| attester::PublicKey::read(v).context(i)) + .map(|(i, v)| { + anyhow::Ok(WeightedAttester { + key: attester::PublicKey::read(v).context(i)?, + weight: 1, + }) + }) .collect::>() - .context("validators")?; + .context("attesters")?; Ok(Self { fork: read_required(&r.fork).context("fork")?, validators: Committee::new(validators.into_iter()).context("validators")?, - attesters: AttesterSet::new(attesters.into_iter()).context("attesters")?, + attesters: attester::Committee::new(attesters.into_iter()).context("attesters")?, version, }) } @@ -87,14 +92,14 @@ impl ProtoFmt for Genesis { GenesisVersion(0) => Self::Proto { fork: Some(self.fork.build()), validators: self.validators.iter().map(|v| v.key.build()).collect(), - attesters: self.attesters.iter().map(|v| v.build()).collect(), + attesters: self.attesters.iter().map(|v| v.key.build()).collect(), validators_v1: vec![], }, GenesisVersion(1..) => Self::Proto { fork: Some(self.fork.build()), validators: vec![], validators_v1: self.validators.iter().map(|v| v.build()).collect(), - attesters: self.attesters.iter().map(|v| v.build()).collect(), + attesters: self.attesters.iter().map(|v| v.key.build()).collect(), }, } } diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index 0123c9f9..4eddc5f0 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -1,6 +1,6 @@ //! Messages related to the consensus protocol. use super::{BlockNumber, LeaderCommit, LeaderPrepare, Msg, ReplicaCommit, ReplicaPrepare}; -use crate::{attester::AttesterSet, validator}; +use crate::{attester, validator}; use anyhow::Context; use bit_vec::BitVec; use std::{collections::BTreeMap, fmt}; @@ -213,7 +213,7 @@ pub struct Genesis { /// Set of validators of the chain. pub validators: Committee, /// Set of attesters of the chain. - pub attesters: AttesterSet, + pub attesters: attester::Committee, /// Fork of the chain to follow. pub fork: Fork, } @@ -234,7 +234,7 @@ impl Default for Genesis { Self { version: GenesisVersion::CURRENT, validators: Committee::default(), - attesters: AttesterSet::default(), + attesters: attester::Committee::default(), fork: Fork::default(), } } diff --git a/node/libs/roles/src/validator/messages/tests.rs b/node/libs/roles/src/validator/messages/tests.rs index 5383bc90..3f2d0342 100644 --- a/node/libs/roles/src/validator/messages/tests.rs +++ b/node/libs/roles/src/validator/messages/tests.rs @@ -1,4 +1,4 @@ -use crate::attester::{self, AttesterSet}; +use crate::attester::{self, WeightedAttester}; use crate::validator::*; use zksync_consensus_crypto::Text; use zksync_consensus_utils::enum_util::Variant as _; @@ -50,7 +50,11 @@ fn genesis_v0() -> Genesis { weight: 1, })) .unwrap(), - attesters: AttesterSet::new(attester_keys().iter().map(|k| k.public())).unwrap(), + attesters: attester::Committee::new(attester_keys().iter().map(|k| WeightedAttester { + key: k.public(), + weight: 1, + })) + .unwrap(), fork: fork(), version: GenesisVersion(0), } @@ -64,7 +68,11 @@ fn genesis_v1() -> Genesis { weight: 1, })) .unwrap(), - attesters: AttesterSet::new(attester_keys().iter().map(|k| k.public())).unwrap(), + attesters: attester::Committee::new(attester_keys().iter().map(|k| WeightedAttester { + key: k.public(), + weight: 1, + })) + .unwrap(), fork: fork(), version: GenesisVersion(1), } diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 1796a50b..9ced7a8c 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -1,5 +1,5 @@ //! Test-only utilities. -use crate::attester::{self, AttesterSet}; +use crate::attester::{self, WeightedAttester}; use super::{ AggregateSignature, BlockHeader, BlockNumber, CommitQC, Committee, ConsensusMsg, FinalBlock, @@ -35,7 +35,13 @@ impl Setup { } })) .unwrap(), - attesters: AttesterSet::new(attester_keys.iter().map(|k| k.public())).unwrap(), + attesters: attester::Committee::new(attester_keys.iter().enumerate().map(|(i, k)| { + WeightedAttester { + key: k.public(), + weight: weights[i], + } + })) + .unwrap(), fork, ..Default::default() }; diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index c2e3950d..a62e06f5 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -1,5 +1,5 @@ use super::*; -use crate::{attester::AttesterSet, validator::testonly::Setup}; +use crate::{attester, validator::testonly::Setup}; use assert_matches::assert_matches; use rand::{seq::SliceRandom, Rng}; use std::vec; @@ -205,7 +205,8 @@ fn test_commit_qc() { let setup2 = Setup::new(rng, 6); let genesis3 = Genesis { validators: Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), - attesters: AttesterSet::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(), + attesters: attester::Committee::new(setup1.genesis.attesters.iter().take(3).cloned()) + .unwrap(), fork: setup1.genesis.fork.clone(), ..Default::default() }; @@ -244,7 +245,8 @@ fn test_prepare_qc() { let setup2 = Setup::new(rng, 6); let genesis3 = Genesis { validators: Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), - attesters: AttesterSet::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(), + attesters: attester::Committee::new(setup1.genesis.attesters.iter().take(3).cloned()) + .unwrap(), fork: setup1.genesis.fork.clone(), ..Default::default() }; From 4e32fc0e3a98542f794f964af242a98823e52195 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 26 Apr 2024 19:49:57 -0300 Subject: [PATCH 23/79] l1 batch signature watcher for nodes in the gossip net --- .../network/src/gossip/batch_signatures.rs | 127 ++++++++++++++++++ node/actors/network/src/gossip/mod.rs | 1 + node/libs/roles/src/attester/conv.rs | 2 + .../roles/src/attester/messages/l1_batch.rs | 14 +- node/libs/roles/src/attester/testonly.rs | 12 +- node/libs/roles/src/attester/tests.rs | 13 +- node/libs/roles/src/proto/attester.proto | 1 + 7 files changed, 161 insertions(+), 9 deletions(-) create mode 100644 node/actors/network/src/gossip/batch_signatures.rs diff --git a/node/actors/network/src/gossip/batch_signatures.rs b/node/actors/network/src/gossip/batch_signatures.rs new file mode 100644 index 00000000..4ce7b957 --- /dev/null +++ b/node/actors/network/src/gossip/batch_signatures.rs @@ -0,0 +1,127 @@ +//! Global state distributed by active validators, observed by all the nodes in the network. +use crate::watch::Watch; +use std::{collections::HashSet, sync::Arc}; +use zksync_concurrency::{sync, time}; +use zksync_consensus_roles::attester::{self, BatchNumber, L1Batch}; + +/// Mapping from validator::PublicKey to a signed validator::NetAddress. +/// Represents the currents state of node's knowledge about the validator endpoints. +#[derive(Clone, Default, PartialEq, Eq)] +pub(crate) struct L1BatchSignatures( + pub(super) im::HashMap>>, +); + +impl L1BatchSignatures { + /// Gets a NetAddress for a given key. + pub(crate) fn get( + &self, + key: &attester::PublicKey, + ) -> Option<&Arc>> { + self.0.get(key) + } + + // /// Returns a set of entries of `self` which are newer than the entries in `b`. + // pub(super) fn get_newer(&self, b: &Self) -> Vec>> { + // let mut newer = vec![]; + // for (k, v) in &self.0 { + // if let Some(bv) = b.0.get(k) { + // if !v.msg.is_newer(&bv.msg) { + // continue; + // } + // } + // newer.push(v.clone()); + // } + // newer + // } + + /// Updates the discovery map with entries from `data`. + /// It exits as soon as an invalid entry is found. + /// `self` might get modified even if an error is returned + /// (all entries verified so far are added). + /// Returns true iff some new entry was added. + pub(super) fn update( + &mut self, + attesters: &attester::Committee, + data: &[Arc>], + ) -> anyhow::Result { + let mut changed = false; + + let mut done = HashSet::new(); + for d in data { + // Disallow multiple entries for the same key: + // it is important because a malicious validator may spam us with + // new versions and verifying signatures is expensive. + if done.contains(&d.key) { + anyhow::bail!("duplicate entry for {:?}", d.key); + } + done.insert(d.key.clone()); + if !attesters.contains(&d.key) { + // We just skip the entries we are not interested in. + // For now the set of validators is static, so we could treat this as an error, + // however we eventually want the validator set to be dynamic. + continue; + } + if let Some(x) = self.0.get(&d.key) { + if !d.msg.is_newer(&x.msg) { + continue; + } + } + d.verify()?; + self.0.insert(d.key.clone(), d.clone()); + changed = true; + } + Ok(changed) + } +} + +/// Watch wrapper of ValidatorAddrs, +/// which supports subscribing to ValidatorAddr updates. +pub(crate) struct L1BatchSignaturesWatch(Watch); + +impl Default for L1BatchSignaturesWatch { + fn default() -> Self { + Self(Watch::new(L1BatchSignatures::default())) + } +} + +impl L1BatchSignaturesWatch { + /// Subscribes to ValidatorAddrs updates. + pub(crate) fn subscribe(&self) -> sync::watch::Receiver { + self.0.subscribe() + } + + /// Inserts a new version of the announcement signed with the given key. + pub(crate) async fn announce( + &self, + key: &attester::SecretKey, + batch_number: BatchNumber, + timestamp: time::Utc, + ) { + let this = self.0.lock().await; + let mut signatures = this.borrow().clone(); + let d = Arc::new(key.sign_batch_msg(L1Batch { + number: batch_number, + timestamp: timestamp, + })); + signatures.0.insert(d.key.clone(), d); + this.send_replace(signatures); + } + + /// Inserts data to ValidatorAddrs. + /// Subscribers are notified iff at least 1 new entry has + /// been inserted. Returns an error iff an invalid + /// entry in `data` has been found. The provider of the + /// invalid entry should be banned. + pub(crate) async fn update( + &self, + attesters: &attester::Committee, + data: &[Arc>], + ) -> anyhow::Result<()> { + let this = self.0.lock().await; + let mut signatures = this.borrow().clone(); + if signatures.update(attesters, data)? { + this.send_replace(signatures); + } + Ok(()) + } +} diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index e8bf588c..32b2470e 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -19,6 +19,7 @@ use zksync_concurrency::{ctx, ctx::channel, scope, sync}; use zksync_consensus_roles::{node, validator}; use zksync_consensus_storage::BlockStore; +mod batch_signatures; mod fetch; mod handshake; mod runner; diff --git a/node/libs/roles/src/attester/conv.rs b/node/libs/roles/src/attester/conv.rs index d4c3d42b..e573f69e 100644 --- a/node/libs/roles/src/attester/conv.rs +++ b/node/libs/roles/src/attester/conv.rs @@ -14,11 +14,13 @@ impl ProtoFmt for L1Batch { fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { number: BatchNumber(*required(&r.number).context("number")?), + timestamp: read_required(&r.timestamp).context("timestamp")?, }) } fn build(&self) -> Self::Proto { Self::Proto { number: Some(self.number.0), + timestamp: Some(self.timestamp.build()), } } } diff --git a/node/libs/roles/src/attester/messages/l1_batch.rs b/node/libs/roles/src/attester/messages/l1_batch.rs index f06dac60..e2f43ed2 100644 --- a/node/libs/roles/src/attester/messages/l1_batch.rs +++ b/node/libs/roles/src/attester/messages/l1_batch.rs @@ -1,3 +1,5 @@ +use zksync_concurrency::time; + use crate::{ attester::{self, AggregateSignature}, validator::Genesis, @@ -5,16 +7,18 @@ use crate::{ use super::{SignedBatchMsg, Signers}; -#[derive(Clone, Debug, PartialEq, Eq, Hash, Default)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, Default, PartialOrd)] /// A batch number. pub struct BatchNumber(pub u64); /// A message to send by attesters to the gossip network. /// It contains the attester signature to sign the block batches to be sent to L1. -#[derive(Clone, Debug, PartialEq, Eq, Hash, Default)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct L1Batch { /// The number of the batch. pub number: BatchNumber, + /// Time at which this message has been signed. + pub timestamp: time::Utc, } /// A certificate for a batch of L2 blocks to be sent to L1. @@ -48,6 +52,12 @@ pub enum L1BatchQCVerifyError { BadSignersSet, } +impl L1Batch { + pub fn is_newer(&self, b: &Self) -> bool { + (&self.number, self.timestamp) > (&b.number, b.timestamp) + } +} + impl L1BatchQC { /// Create a new empty instance for a given `L1Batch` message. pub fn new(message: L1Batch, genesis: &Genesis) -> Self { diff --git a/node/libs/roles/src/attester/testonly.rs b/node/libs/roles/src/attester/testonly.rs index 7d7b0ea9..4a6e1859 100644 --- a/node/libs/roles/src/attester/testonly.rs +++ b/node/libs/roles/src/attester/testonly.rs @@ -1,6 +1,6 @@ use super::{ - AggregateSignature, Committee, L1Batch, L1BatchQC, Msg, MsgHash, PublicKey, SecretKey, - Signature, SignedBatchMsg, Signers, WeightedAttester, + AggregateSignature, BatchNumber, Committee, L1Batch, L1BatchQC, Msg, MsgHash, PublicKey, + SecretKey, Signature, SignedBatchMsg, Signers, WeightedAttester, }; use bit_vec::BitVec; use rand::{ @@ -8,6 +8,7 @@ use rand::{ Rng, }; use std::sync::Arc; +use zksync_concurrency::time; use zksync_consensus_utils::enum_util::Variant; impl AggregateSignature { @@ -51,8 +52,11 @@ impl Distribution for Standard { } impl Distribution for Standard { - fn sample(&self, _rng: &mut R) -> L1Batch { - L1Batch::default() + fn sample(&self, rng: &mut R) -> L1Batch { + L1Batch { + number: BatchNumber(rng.gen()), + timestamp: time::UNIX_EPOCH + time::Duration::seconds(rng.gen_range(0..1000000000)), + } } } diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index 3a67af20..dc442aa8 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -6,7 +6,7 @@ use crate::{ use super::*; use assert_matches::assert_matches; use rand::Rng; -use zksync_concurrency::ctx; +use zksync_concurrency::{ctx, time}; use zksync_consensus_crypto::{ByteFmt, Text, TextFmt}; use zksync_protobuf::testonly::test_encode_random; @@ -137,6 +137,13 @@ fn test_agg_signature_verify() { .is_err()); } +fn make_l1_batch_msg(rng: &mut impl Rng) -> L1Batch { + L1Batch { + number: BatchNumber(rng.gen()), + timestamp: time::UNIX_EPOCH + time::Duration::seconds(rng.gen_range(0..1000000000)), + } +} + #[test] fn test_l1_batch_qc() { use L1BatchQCVerifyError as Error; @@ -155,7 +162,7 @@ fn test_l1_batch_qc() { let attester_weight = setup1.genesis.attesters.total_weight() / 6; for i in 0..setup1.attester_keys.len() + 1 { - let mut qc = L1BatchQC::new(L1Batch::default(), &setup1.genesis); + let mut qc = L1BatchQC::new(make_l1_batch_msg(rng), &setup1.genesis); for key in &setup1.attester_keys[0..i] { qc.add(&key.sign_batch_msg(qc.message.clone()), &setup1.genesis); } @@ -185,7 +192,7 @@ fn test_attester_committee_weights() { // Expected sum of the attesters weights let sums = [1000, 1600, 2400, 8400, 9300, 10000]; - let msg = L1Batch::default(); + let msg = make_l1_batch_msg(rng); let mut qc = L1BatchQC::new(msg.clone(), &setup.genesis); for (n, weight) in sums.iter().enumerate() { let key = &setup.attester_keys[n]; diff --git a/node/libs/roles/src/proto/attester.proto b/node/libs/roles/src/proto/attester.proto index 9bedebff..8276c6a4 100644 --- a/node/libs/roles/src/proto/attester.proto +++ b/node/libs/roles/src/proto/attester.proto @@ -6,6 +6,7 @@ import "zksync/std.proto"; message L1Batch { optional uint64 number = 1; // required + optional std.Timestamp timestamp = 2; // required } message L1BatchQC { From 7c1ab8a84df196299a037e97180dc8fa0f831e71 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Apr 2024 11:16:13 -0300 Subject: [PATCH 24/79] New push signature request for full nodes in the gossip network --- node/actors/network/src/rpc/push_signature.rs | 42 +++++++++++++++ node/actors/network/src/rpc/signature.rs | 51 ------------------- 2 files changed, 42 insertions(+), 51 deletions(-) create mode 100644 node/actors/network/src/rpc/push_signature.rs delete mode 100644 node/actors/network/src/rpc/signature.rs diff --git a/node/actors/network/src/rpc/push_signature.rs b/node/actors/network/src/rpc/push_signature.rs new file mode 100644 index 00000000..ab0c077f --- /dev/null +++ b/node/actors/network/src/rpc/push_signature.rs @@ -0,0 +1,42 @@ +//! Defines RPC for passing consensus messages. +use std::sync::Arc; + +use crate::{mux, proto::gossip as proto}; +use anyhow::Context; +use zksync_consensus_roles::attester::{self, L1Batch}; +use zksync_protobuf::ProtoFmt; + +/// Signature RPC. +pub(crate) struct Rpc; + +impl super::Rpc for Rpc { + const CAPABILITY_ID: mux::CapabilityId = 5; + const INFLIGHT: u32 = 1; + const METHOD: &'static str = "push_signature"; + type Req = Req; + type Resp = (); +} + +/// Signed consensus message that the receiving peer should process. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct Req(pub(crate) Vec>>); + +impl ProtoFmt for Req { + type Proto = proto::PushSignature; + + fn read(r: &Self::Proto) -> anyhow::Result { + let mut signatures = vec![]; + for (i, e) in r.signatures.iter().enumerate() { + signatures.push(Arc::new( + ProtoFmt::read(e).with_context(|| format!("signatures[{i}]"))?, + )); + } + Ok(Self(signatures)) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + signatures: self.0.iter().map(|a| ProtoFmt::build(a.as_ref())).collect(), + } + } +} diff --git a/node/actors/network/src/rpc/signature.rs b/node/actors/network/src/rpc/signature.rs deleted file mode 100644 index 9145b3cc..00000000 --- a/node/actors/network/src/rpc/signature.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! Defines RPC for passing consensus messages. -use crate::{consensus::Network, mux, proto::consensus as proto}; -use zksync_consensus_roles::attester::{self, L1Batch}; -use zksync_protobuf::{read_required, ProtoFmt}; - -/// Signature RPC. -pub(crate) struct Rpc; - -impl super::Rpc for Rpc { - const CAPABILITY_ID: mux::CapabilityId = 5; - const INFLIGHT: u32 = 1; - const METHOD: &'static str = "signature"; - type Req = Req; - type Resp = (); -} - -/// RPC server for the L1 batch messages. -pub(crate) struct L1BatchServer<'a>(pub(crate) &'a Network); -/// Signed consensus message that the receiving peer should process. -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct Req(pub(crate) attester::SignedBatchMsg); - -/// Confirmation that the signature message has been processed. -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct Resp; - -impl ProtoFmt for Req { - type Proto = proto::SignatureReq; - - fn read(r: &Self::Proto) -> anyhow::Result { - read_required(&r.msg).map(Self) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - msg: Some(self.0.build()), - } - } -} - -impl ProtoFmt for Resp { - type Proto = proto::SignatureResp; - - fn read(_r: &Self::Proto) -> anyhow::Result { - Ok(Self) - } - - fn build(&self) -> Self::Proto { - Self::Proto {} - } -} From dd912d13ab35afc61ecbd0308e12a41602cabb52 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Apr 2024 11:19:50 -0300 Subject: [PATCH 25/79] Update gossip proto file --- node/actors/network/src/proto/gossip.proto | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/node/actors/network/src/proto/gossip.proto b/node/actors/network/src/proto/gossip.proto index db219151..4c306e68 100644 --- a/node/actors/network/src/proto/gossip.proto +++ b/node/actors/network/src/proto/gossip.proto @@ -4,6 +4,7 @@ package zksync.network.gossip; import "zksync/roles/node.proto"; import "zksync/roles/validator.proto"; +import "zksync/roles/attester.proto"; // First message exchanged in the encrypted session. message Handshake { @@ -17,6 +18,11 @@ message PushValidatorAddrs { repeated roles.validator.Signed net_addresses = 1; } +message PushSignature { + // Signed roles.validator.Msg.net_address. + repeated roles.attester.SignedBatch signatures = 1; +} + // State of the local block store. // A node is expected to store a continuous range of blocks at all times // and actively fetch newest blocks. From 182adcac7d5de7d8c954f087377a060322f8c23a Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Apr 2024 11:20:20 -0300 Subject: [PATCH 26/79] Add push signature watcher to the runner of the gossip network --- node/actors/network/src/config.rs | 4 +- node/actors/network/src/consensus/mod.rs | 23 +------- .../network/src/gossip/batch_signatures.rs | 26 ++++----- node/actors/network/src/gossip/mod.rs | 5 ++ node/actors/network/src/gossip/runner.rs | 55 ++++++++++++++++++- node/actors/network/src/rpc/mod.rs | 2 +- 6 files changed, 74 insertions(+), 41 deletions(-) diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs index 9631c531..effe3c90 100644 --- a/node/actors/network/src/config.rs +++ b/node/actors/network/src/config.rs @@ -21,7 +21,7 @@ pub struct RpcConfig { /// Max rate of sending/receiving consensus messages. pub consensus_rate: limiter::Rate, /// Max rate of sending/receiving l1 batch signature messages. - pub l1_batch_rate: limiter::Rate, + pub push_l1_batch_signature_rate: limiter::Rate, } impl Default for RpcConfig { @@ -44,7 +44,7 @@ impl Default for RpcConfig { burst: 10, refresh: time::Duration::ZERO, }, - l1_batch_rate: limiter::Rate { + push_l1_batch_signature_rate: limiter::Rate { burst: 10, refresh: time::Duration::ZERO, }, diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 074264fd..8bc3b659 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -4,7 +4,7 @@ use crate::{ config, gossip, io, noise, pool::PoolWatch, preface, - rpc::{self, signature::L1BatchServer}, + rpc::{self}, }; use anyhow::Context as _; use rand::seq::SliceRandom; @@ -14,7 +14,6 @@ use std::{ }; use tracing::Instrument as _; use zksync_concurrency::{ctx, oneshot, scope, sync, time}; -use zksync_consensus_roles::attester::L1BatchQC; use zksync_consensus_roles::validator::{self}; use zksync_protobuf::kB; @@ -123,8 +122,6 @@ pub(crate) struct Network { pub(crate) inbound: PoolWatch, /// Set of the currently open outbound connections. pub(crate) outbound: PoolWatch, - /// Last L1 batch QC. - pub(crate) l1_batch_qc: Option, /// Messages to be sent to validators. pub(crate) msg_pool: MsgPool, } @@ -155,23 +152,6 @@ impl rpc::Handler for &Network { } } -#[async_trait::async_trait] -impl rpc::Handler for &L1BatchServer<'_> { - /// Here we bound the buffering of incoming consensus messages. - fn max_req_size(&self) -> usize { - self.0.gossip.cfg.max_block_size.saturating_add(kB) - } - - async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::signature::Req) -> anyhow::Result<()> { - let genesis = self.0.gossip.genesis(); - // FIXME Remove unwrap and find a way to handle the QC. - let qc = self.0.l1_batch_qc.as_ref().context("no L1BatchQC")?; - qc.verify(genesis).unwrap(); - qc.clone().add(&req.0, genesis); - return Ok(()); - } -} - impl Network { /// Constructs a new consensus network state. pub(crate) fn new(gossip: Arc) -> Option> { @@ -181,7 +161,6 @@ impl Network { key, inbound: PoolWatch::new(validators.clone(), 0), outbound: PoolWatch::new(validators.clone(), 0), - l1_batch_qc: None, gossip, msg_pool: MsgPool::new(), })) diff --git a/node/actors/network/src/gossip/batch_signatures.rs b/node/actors/network/src/gossip/batch_signatures.rs index 4ce7b957..41955707 100644 --- a/node/actors/network/src/gossip/batch_signatures.rs +++ b/node/actors/network/src/gossip/batch_signatures.rs @@ -20,19 +20,19 @@ impl L1BatchSignatures { self.0.get(key) } - // /// Returns a set of entries of `self` which are newer than the entries in `b`. - // pub(super) fn get_newer(&self, b: &Self) -> Vec>> { - // let mut newer = vec![]; - // for (k, v) in &self.0 { - // if let Some(bv) = b.0.get(k) { - // if !v.msg.is_newer(&bv.msg) { - // continue; - // } - // } - // newer.push(v.clone()); - // } - // newer - // } + /// Returns a set of entries of `self` which are newer than the entries in `b`. + pub(super) fn get_newer(&self, b: &Self) -> Vec>> { + let mut newer = vec![]; + for (k, v) in &self.0 { + if let Some(bv) = b.0.get(k) { + if !v.msg.is_newer(&bv.msg) { + continue; + } + } + newer.push(v.clone()); + } + newer + } /// Updates the discovery map with entries from `data`. /// It exits as soon as an invalid entry is found. diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 32b2470e..0e03ae9e 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -19,6 +19,8 @@ use zksync_concurrency::{ctx, ctx::channel, scope, sync}; use zksync_consensus_roles::{node, validator}; use zksync_consensus_storage::BlockStore; +use self::batch_signatures::L1BatchSignaturesWatch; + mod batch_signatures; mod fetch; mod handshake; @@ -39,6 +41,8 @@ pub(crate) struct Network { pub(crate) outbound: PoolWatch, /// Current state of knowledge about validators' endpoints. pub(crate) validator_addrs: ValidatorAddrsWatch, + /// Current state of knowledge about batch signatures. + pub(crate) batch_signatures: L1BatchSignaturesWatch, /// Block store to serve `get_block` requests from. pub(crate) block_store: Arc, /// Output pipe of the network actor. @@ -64,6 +68,7 @@ impl Network { ), outbound: PoolWatch::new(cfg.gossip.static_outbound.keys().cloned().collect(), 0), validator_addrs: ValidatorAddrsWatch::default(), + batch_signatures: L1BatchSignaturesWatch::default(), cfg, fetch_queue: fetch::Queue::default(), block_store, diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index 10e06755..bc81d0b4 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -1,5 +1,8 @@ -use super::{handshake, Network, ValidatorAddrs}; -use crate::{noise, preface, rpc}; +use super::{batch_signatures::L1BatchSignatures, handshake, Network, ValidatorAddrs}; +use crate::{ + noise, preface, + rpc::{self}, +}; use anyhow::Context as _; use async_trait::async_trait; use rand::seq::SliceRandom; @@ -26,7 +29,25 @@ impl rpc::Handler for PushValidatorAddrsServer<' .fetch_add(1, Ordering::SeqCst); self.0 .validator_addrs - .update(&self.0.genesis().validators, &req.0[..]) + .update(&self.0.genesis().validators, &req.0) + .await?; + Ok(()) + } +} + +struct L1BatchServer<'a>(&'a Network); + +#[async_trait::async_trait] +impl rpc::Handler for L1BatchServer<'_> { + /// Here we bound the buffering of incoming consensus messages. + fn max_req_size(&self) -> usize { + 100 * kB + } + + async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::push_signature::Req) -> anyhow::Result<()> { + self.0 + .batch_signatures + .update(&self.0.genesis().attesters, &req.0) .await?; Ok(()) } @@ -92,6 +113,11 @@ impl Network { ctx, self.cfg.rpc.push_block_store_state_rate, ); + let push_signature_client = rpc::Client::::new( + ctx, + self.cfg.rpc.push_l1_batch_signature_rate, + ); + let push_signature_server = L1BatchServer(self); let push_block_store_state_server = PushBlockStoreStateServer::new(self); let get_block_client = rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate); @@ -103,6 +129,12 @@ impl Network { push_validator_addrs_server, self.cfg.rpc.push_validator_addrs_rate, ) + .add_client(&push_signature_client) + .add_server( + ctx, + push_signature_server, + self.cfg.rpc.push_l1_batch_signature_rate, + ) .add_client(&push_block_store_state_client) .add_server( ctx, @@ -152,6 +184,23 @@ impl Network { } }); + // Push L1 batch signatures updates to peer. + s.spawn::<()>(async { + let mut old = L1BatchSignatures::default(); + let mut sub = self.batch_signatures.subscribe(); + sub.mark_changed(); + loop { + let new = sync::changed(ctx, &mut sub).await?.clone(); + let diff = new.get_newer(&old); + if diff.is_empty() { + continue; + } + old = new; + let req = rpc::push_signature::Req(diff); + push_signature_client.call(ctx, &req, kB).await?; + } + }); + // Perform get_block calls to peer. s.spawn::<()>(async { let state = &mut push_block_store_state_server.state.subscribe(); diff --git a/node/actors/network/src/rpc/mod.rs b/node/actors/network/src/rpc/mod.rs index de7b9d4a..e30c4808 100644 --- a/node/actors/network/src/rpc/mod.rs +++ b/node/actors/network/src/rpc/mod.rs @@ -26,8 +26,8 @@ pub(crate) mod get_block; mod metrics; pub(crate) mod ping; pub(crate) mod push_block_store_state; +pub(crate) mod push_signature; pub(crate) mod push_validator_addrs; -pub(crate) mod signature; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] From d6a5f80a78c2f977b723cf5e274291d0cfc562c9 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Apr 2024 12:16:52 -0300 Subject: [PATCH 27/79] Fix lints --- .../network/src/gossip/batch_signatures.rs | 50 +++++++++---------- .../roles/src/attester/messages/l1_batch.rs | 1 + 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/node/actors/network/src/gossip/batch_signatures.rs b/node/actors/network/src/gossip/batch_signatures.rs index 41955707..493d66c7 100644 --- a/node/actors/network/src/gossip/batch_signatures.rs +++ b/node/actors/network/src/gossip/batch_signatures.rs @@ -1,8 +1,8 @@ //! Global state distributed by active validators, observed by all the nodes in the network. use crate::watch::Watch; use std::{collections::HashSet, sync::Arc}; -use zksync_concurrency::{sync, time}; -use zksync_consensus_roles::attester::{self, BatchNumber, L1Batch}; +use zksync_concurrency::sync; +use zksync_consensus_roles::attester::{self, L1Batch}; /// Mapping from validator::PublicKey to a signed validator::NetAddress. /// Represents the currents state of node's knowledge about the validator endpoints. @@ -12,13 +12,13 @@ pub(crate) struct L1BatchSignatures( ); impl L1BatchSignatures { - /// Gets a NetAddress for a given key. - pub(crate) fn get( - &self, - key: &attester::PublicKey, - ) -> Option<&Arc>> { - self.0.get(key) - } + // /// Gets a NetAddress for a given key. + // pub(crate) fn get( + // &self, + // key: &attester::PublicKey, + // ) -> Option<&Arc>> { + // self.0.get(key) + // } /// Returns a set of entries of `self` which are newer than the entries in `b`. pub(super) fn get_newer(&self, b: &Self) -> Vec>> { @@ -90,22 +90,22 @@ impl L1BatchSignaturesWatch { self.0.subscribe() } - /// Inserts a new version of the announcement signed with the given key. - pub(crate) async fn announce( - &self, - key: &attester::SecretKey, - batch_number: BatchNumber, - timestamp: time::Utc, - ) { - let this = self.0.lock().await; - let mut signatures = this.borrow().clone(); - let d = Arc::new(key.sign_batch_msg(L1Batch { - number: batch_number, - timestamp: timestamp, - })); - signatures.0.insert(d.key.clone(), d); - this.send_replace(signatures); - } + // /// Inserts a new version of the announcement signed with the given key. + // pub(crate) async fn announce( + // &self, + // key: &attester::SecretKey, + // batch_number: BatchNumber, + // timestamp: time::Utc, + // ) { + // let this = self.0.lock().await; + // let mut signatures = this.borrow().clone(); + // let d = Arc::new(key.sign_batch_msg(L1Batch { + // number: batch_number, + // timestamp, + // })); + // signatures.0.insert(d.key.clone(), d); + // this.send_replace(signatures); + // } /// Inserts data to ValidatorAddrs. /// Subscribers are notified iff at least 1 new entry has diff --git a/node/libs/roles/src/attester/messages/l1_batch.rs b/node/libs/roles/src/attester/messages/l1_batch.rs index e2f43ed2..2118612c 100644 --- a/node/libs/roles/src/attester/messages/l1_batch.rs +++ b/node/libs/roles/src/attester/messages/l1_batch.rs @@ -53,6 +53,7 @@ pub enum L1BatchQCVerifyError { } impl L1Batch { + /// Checks if `self` is a newer version than `b`. pub fn is_newer(&self, b: &Self) -> bool { (&self.number, self.timestamp) > (&b.number, b.timestamp) } From 1359e540d175ceb519e48a5d08518ae5ed9edd7e Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Apr 2024 18:02:23 -0300 Subject: [PATCH 28/79] Add attester key to the config --- node/actors/executor/src/lib.rs | 20 +++++++++-- node/actors/executor/src/tests.rs | 2 ++ node/actors/network/src/config.rs | 5 ++- node/actors/network/src/testonly.rs | 48 +++++++++++++++------------ node/tools/src/bin/deployer.rs | 2 ++ node/tools/src/bin/localnet_config.rs | 2 ++ node/tools/src/config.rs | 7 +++- node/tools/src/proto/mod.proto | 3 ++ node/tools/src/tests.rs | 1 + 9 files changed, 65 insertions(+), 25 deletions(-) diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 758f587c..21fe030a 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -9,7 +9,7 @@ use std::{ use zksync_concurrency::{ctx, limiter, net, scope, time}; use zksync_consensus_bft as bft; use zksync_consensus_network as network; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::{attester, node, validator}; use zksync_consensus_storage::{BlockStore, ReplicaStore}; use zksync_consensus_utils::pipe; use zksync_protobuf::kB; @@ -28,6 +28,12 @@ pub struct Validator { pub payload_manager: Box, } +/// Validator-related part of [`Executor`]. +pub struct Attester { + /// Consensus network configuration. + pub key: attester::SecretKey, +} + impl fmt::Debug for Validator { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("ValidatorExecutor") @@ -36,6 +42,14 @@ impl fmt::Debug for Validator { } } +impl fmt::Debug for Attester { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("AttesterExecutor") + .field("key", &self.key) + .finish() + } +} + /// Config of the node executor. #[derive(Clone, Debug)] pub struct Config { @@ -47,7 +61,6 @@ pub struct Config { pub public_addr: net::Host, /// Maximal size of the block payload. pub max_payload_size: usize, - /// Key of this node. It uniquely identifies the node. /// It should match the secret key provided in the `node_key` file. pub node_key: node::SecretKey, @@ -82,6 +95,8 @@ pub struct Executor { pub block_store: Arc, /// Validator-specific node data. pub validator: Option, + /// Attester-specific node data. + pub attester: Option, } impl Executor { @@ -92,6 +107,7 @@ impl Executor { public_addr: self.config.public_addr.clone(), gossip: self.config.gossip(), validator_key: self.validator.as_ref().map(|v| v.key.clone()), + attester_key: self.attester.as_ref().map(|v| v.key.clone()), ping_timeout: Some(time::Duration::seconds(10)), max_block_size: self.config.max_payload_size.saturating_add(kB), max_block_queue_size: 20, diff --git a/node/actors/executor/src/tests.rs b/node/actors/executor/src/tests.rs index ebae04fb..180f3e3e 100644 --- a/node/actors/executor/src/tests.rs +++ b/node/actors/executor/src/tests.rs @@ -35,6 +35,7 @@ fn validator( replica_store: Box::new(replica_store), payload_manager: Box::new(bft::testonly::RandomPayload(1000)), }), + attester: None, } } @@ -43,6 +44,7 @@ fn fullnode(cfg: &network::Config, block_store: Arc) -> Executor { config: config(cfg), block_store, validator: None, + attester: None, } } diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs index effe3c90..e7c21cf9 100644 --- a/node/actors/network/src/config.rs +++ b/node/actors/network/src/config.rs @@ -1,7 +1,7 @@ //! Network actor configs. use std::collections::{HashMap, HashSet}; use zksync_concurrency::{limiter, net, time}; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::{attester, node, validator}; /// How often we should retry to establish a connection to a validator. /// TODO(gprusak): once it becomes relevant, choose a more appropriate retry strategy. @@ -83,6 +83,9 @@ pub struct Config { /// Private key of the validator. /// None if the node is NOT a validator. pub validator_key: Option, + /// Private key of the attester. + /// None if the node is NOT an attester. + pub attester_key: Option, /// Maximal size of the proto-encoded `validator::FinalBlock` in bytes. pub max_block_size: usize, /// If a peer doesn't respond to a ping message within `ping_timeout`, diff --git a/node/actors/network/src/testonly.rs b/node/actors/network/src/testonly.rs index cf413c50..de8fb5ea 100644 --- a/node/actors/network/src/testonly.rs +++ b/node/actors/network/src/testonly.rs @@ -77,27 +77,32 @@ pub fn new_configs( setup: &validator::testonly::Setup, gossip_peers: usize, ) -> Vec { - let configs = setup.validator_keys.iter().map(|key| { - let addr = net::tcp::testonly::reserve_listener(); - Config { - server_addr: addr, - public_addr: (*addr).into(), - // Pings are disabled in tests by default to avoid dropping connections - // due to timeouts. - ping_timeout: None, - validator_key: Some(key.clone()), - gossip: GossipConfig { - key: rng.gen(), - dynamic_inbound_limit: usize::MAX, - static_inbound: HashSet::default(), - static_outbound: HashMap::default(), - }, - max_block_size: usize::MAX, - tcp_accept_rate: limiter::Rate::INF, - rpc: RpcConfig::default(), - max_block_queue_size: 10, - } - }); + let configs = setup + .validator_keys + .iter() + .zip(setup.attester_keys.iter()) + .map(|(validator_key, attester_key)| { + let addr = net::tcp::testonly::reserve_listener(); + Config { + server_addr: addr, + public_addr: (*addr).into(), + // Pings are disabled in tests by default to avoid dropping connections + // due to timeouts. + ping_timeout: None, + validator_key: Some(validator_key.clone()), + attester_key: Some(attester_key.clone()), + gossip: GossipConfig { + key: rng.gen(), + dynamic_inbound_limit: usize::MAX, + static_inbound: HashSet::default(), + static_outbound: HashMap::default(), + }, + max_block_size: usize::MAX, + tcp_accept_rate: limiter::Rate::INF, + rpc: RpcConfig::default(), + max_block_queue_size: 10, + } + }); let mut cfgs: Vec<_> = configs.collect(); let n = cfgs.len(); @@ -123,6 +128,7 @@ pub fn new_fullnode(rng: &mut impl Rng, peer: &Config) -> Config { // due to timeouts. ping_timeout: None, validator_key: None, + attester_key: None, gossip: GossipConfig { key: rng.gen(), dynamic_inbound_limit: usize::MAX, diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 609f2628..634481d5 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -30,6 +30,7 @@ fn generate_consensus_nodes(nodes: usize, seed_nodes_amount: Option) -> V let setup = validator::testonly::Setup::new(rng, nodes); let validator_keys = setup.validator_keys.clone(); + let attester_keys = setup.attester_keys.clone(); // Each node will have `gossip_peers` outbound peers. let peers = 2; @@ -47,6 +48,7 @@ fn generate_consensus_nodes(nodes: usize, seed_nodes_amount: Option) -> V genesis: setup.genesis.clone(), max_payload_size: 1000000, validator_key: Some(validator_keys[i].clone()), + attester_key: Some(attester_keys[i].clone()), node_key: node_keys[i].clone(), gossip_dynamic_inbound_limit: 2, gossip_static_inbound: [].into(), diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index 18d3c00e..3c5c2a39 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -59,6 +59,7 @@ fn main() -> anyhow::Result<()> { let setup = validator::testonly::Setup::new(rng, validator_count); let validator_keys = setup.validator_keys.clone(); + let attester_keys = setup.attester_keys.clone(); // Each node will have `gossip_peers` outbound peers. let nodes = addrs.len(); @@ -77,6 +78,7 @@ fn main() -> anyhow::Result<()> { max_payload_size: 1000000, node_key: node_keys[i].clone(), validator_key: validator_keys.get(i).cloned(), + attester_key: attester_keys.get(i).cloned(), gossip_dynamic_inbound_limit: peers, gossip_static_inbound: HashSet::default(), gossip_static_outbound: HashMap::default(), diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index a6999f9f..c763b4f5 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -11,7 +11,7 @@ use zksync_concurrency::{ctx, net}; use zksync_consensus_bft as bft; use zksync_consensus_crypto::{read_optional_text, read_required_text, Text, TextFmt}; use zksync_consensus_executor as executor; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::{attester, node, validator}; use zksync_consensus_storage::{BlockStore, BlockStoreRunner}; use zksync_protobuf::{read_required, required, ProtoFmt}; @@ -93,6 +93,7 @@ pub struct AppConfig { pub genesis: validator::Genesis, pub max_payload_size: usize, pub validator_key: Option, + pub attester_key: Option, pub node_key: node::SecretKey, pub gossip_dynamic_inbound_limit: usize, @@ -133,6 +134,8 @@ impl ProtoFmt for AppConfig { // TODO: read secret. validator_key: read_optional_secret_text(&r.validator_secret_key) .context("validator_secret_key")?, + attester_key: read_optional_secret_text(&r.attester_secret_key) + .context("attester_secret_key")?, node_key: read_required_secret_text(&r.node_secret_key).context("node_secret_key")?, gossip_dynamic_inbound_limit: required(&r.gossip_dynamic_inbound_limit) @@ -153,6 +156,7 @@ impl ProtoFmt for AppConfig { genesis: Some(self.genesis.build()), max_payload_size: Some(self.max_payload_size.try_into().unwrap()), validator_secret_key: self.validator_key.as_ref().map(TextFmt::encode), + attester_secret_key: self.attester_key.as_ref().map(TextFmt::encode), node_secret_key: Some(self.node_key.encode()), gossip_dynamic_inbound_limit: Some( @@ -210,6 +214,7 @@ impl Configs { self.app.max_payload_size, )), }), + attester: None, }; Ok((e, runner)) } diff --git a/node/tools/src/proto/mod.proto b/node/tools/src/proto/mod.proto index 449d144f..85c7fc74 100644 --- a/node/tools/src/proto/mod.proto +++ b/node/tools/src/proto/mod.proto @@ -79,6 +79,9 @@ message AppConfig { // Validator secret key. optional string validator_secret_key = 10; // optional; ValidatorSecretKey + // Validator secret key. + optional string attester_secret_key = 12; // optional; AttesterSecretKey + // Gossip network // Node secret key. diff --git a/node/tools/src/tests.rs b/node/tools/src/tests.rs index 25f631fd..4ba04cef 100644 --- a/node/tools/src/tests.rs +++ b/node/tools/src/tests.rs @@ -18,6 +18,7 @@ impl Distribution for EncodeDist { genesis: rng.gen(), max_payload_size: rng.gen(), validator_key: self.sample_opt(|| rng.gen()), + attester_key: self.sample_opt(|| rng.gen()), node_key: rng.gen(), gossip_dynamic_inbound_limit: rng.gen(), From 1e4e72022a7a99fbae2838576aad70e747ce3831 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Apr 2024 18:02:51 -0300 Subject: [PATCH 29/79] Identify node in gossip network adding the attester key --- node/actors/network/src/gossip/mod.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 0e03ae9e..b7c2fba2 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -13,10 +13,15 @@ //! eclipse attack. Dynamic connections are supposed to improve the properties of the gossip //! network graph (minimize its diameter, increase connectedness). use crate::{gossip::ValidatorAddrsWatch, io, pool::PoolWatch, Config}; +use im::HashMap; +use snow::types::Hash; use std::sync::{atomic::AtomicUsize, Arc}; pub(crate) use validator_addrs::*; use zksync_concurrency::{ctx, ctx::channel, scope, sync}; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::{ + attester::{self, BatchNumber, L1Batch, L1BatchQC, SignedBatchMsg}, + node, validator, +}; use zksync_consensus_storage::BlockStore; use self::batch_signatures::L1BatchSignaturesWatch; @@ -49,6 +54,10 @@ pub(crate) struct Network { pub(crate) sender: channel::UnboundedSender, /// Queue of block fetching requests. pub(crate) fetch_queue: fetch::Queue, + /// Attester SecretKey, None if the node is not an attester. + pub(crate) attester_key: Option, + /// L1 batch qc. + pub(crate) l1_batch_qc: HashMap, /// TESTONLY: how many time push_validator_addrs rpc was called by the peers. pub(crate) push_validator_addrs_calls: AtomicUsize, } @@ -69,6 +78,8 @@ impl Network { outbound: PoolWatch::new(cfg.gossip.static_outbound.keys().cloned().collect(), 0), validator_addrs: ValidatorAddrsWatch::default(), batch_signatures: L1BatchSignaturesWatch::default(), + attester_key: cfg.attester_key.clone(), + l1_batch_qc: HashMap::new(), cfg, fetch_queue: fetch::Queue::default(), block_store, @@ -109,4 +120,12 @@ impl Network { }) .await; } + + pub(crate) async fn update_actual_qc(&self, msg: SignedBatchMsg) { + self.l1_batch_qc + .clone() + .entry(msg.msg.number.clone()) + .or_insert_with(|| L1BatchQC::new(msg.msg.clone(), self.genesis())) + .add(&msg, self.genesis()); + } } From 5f5cee1989403f651b6a1b22b2ded2b78eb0f67c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 3 May 2024 15:05:00 -0300 Subject: [PATCH 30/79] Update watcher and signature broadcasting tasks --- .../network/src/gossip/batch_signatures.rs | 45 ++++------------ node/actors/network/src/gossip/mod.rs | 51 ++++++++++++++++--- node/actors/network/src/lib.rs | 15 +++++- .../roles/src/attester/messages/l1_batch.rs | 7 +++ 4 files changed, 74 insertions(+), 44 deletions(-) diff --git a/node/actors/network/src/gossip/batch_signatures.rs b/node/actors/network/src/gossip/batch_signatures.rs index 493d66c7..63f1fbe9 100644 --- a/node/actors/network/src/gossip/batch_signatures.rs +++ b/node/actors/network/src/gossip/batch_signatures.rs @@ -1,25 +1,17 @@ -//! Global state distributed by active validators, observed by all the nodes in the network. +//! Global state distributed by active attesters, observed by all the nodes in the network. use crate::watch::Watch; use std::{collections::HashSet, sync::Arc}; use zksync_concurrency::sync; use zksync_consensus_roles::attester::{self, L1Batch}; -/// Mapping from validator::PublicKey to a signed validator::NetAddress. -/// Represents the currents state of node's knowledge about the validator endpoints. +/// Mapping from attester::PublicKey to a signed attester::L1Batch message. +/// Represents the currents state of node's knowledge about the attester signatures. #[derive(Clone, Default, PartialEq, Eq)] pub(crate) struct L1BatchSignatures( pub(super) im::HashMap>>, ); impl L1BatchSignatures { - // /// Gets a NetAddress for a given key. - // pub(crate) fn get( - // &self, - // key: &attester::PublicKey, - // ) -> Option<&Arc>> { - // self.0.get(key) - // } - /// Returns a set of entries of `self` which are newer than the entries in `b`. pub(super) fn get_newer(&self, b: &Self) -> Vec>> { let mut newer = vec![]; @@ -49,7 +41,7 @@ impl L1BatchSignatures { let mut done = HashSet::new(); for d in data { // Disallow multiple entries for the same key: - // it is important because a malicious validator may spam us with + // it is important because a malicious attester may spam us with // new versions and verifying signatures is expensive. if done.contains(&d.key) { anyhow::bail!("duplicate entry for {:?}", d.key); @@ -57,8 +49,8 @@ impl L1BatchSignatures { done.insert(d.key.clone()); if !attesters.contains(&d.key) { // We just skip the entries we are not interested in. - // For now the set of validators is static, so we could treat this as an error, - // however we eventually want the validator set to be dynamic. + // For now the set of attesters is static, so we could treat this as an error, + // however we eventually want the attester set to be dynamic. continue; } if let Some(x) = self.0.get(&d.key) { @@ -74,8 +66,8 @@ impl L1BatchSignatures { } } -/// Watch wrapper of ValidatorAddrs, -/// which supports subscribing to ValidatorAddr updates. +/// Watch wrapper of L1BatchSignatures, +/// which supports subscribing to L1BatchSignatures updates. pub(crate) struct L1BatchSignaturesWatch(Watch); impl Default for L1BatchSignaturesWatch { @@ -85,29 +77,12 @@ impl Default for L1BatchSignaturesWatch { } impl L1BatchSignaturesWatch { - /// Subscribes to ValidatorAddrs updates. + /// Subscribes to L1BatchSignatures updates. pub(crate) fn subscribe(&self) -> sync::watch::Receiver { self.0.subscribe() } - // /// Inserts a new version of the announcement signed with the given key. - // pub(crate) async fn announce( - // &self, - // key: &attester::SecretKey, - // batch_number: BatchNumber, - // timestamp: time::Utc, - // ) { - // let this = self.0.lock().await; - // let mut signatures = this.borrow().clone(); - // let d = Arc::new(key.sign_batch_msg(L1Batch { - // number: batch_number, - // timestamp, - // })); - // signatures.0.insert(d.key.clone(), d); - // this.send_replace(signatures); - // } - - /// Inserts data to ValidatorAddrs. + /// Inserts data to L1BatchSignatures. /// Subscribers are notified iff at least 1 new entry has /// been inserted. Returns an error iff an invalid /// entry in `data` has been found. The provider of the diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index b7c2fba2..74fe79b5 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -14,12 +14,11 @@ //! network graph (minimize its diameter, increase connectedness). use crate::{gossip::ValidatorAddrsWatch, io, pool::PoolWatch, Config}; use im::HashMap; -use snow::types::Hash; use std::sync::{atomic::AtomicUsize, Arc}; pub(crate) use validator_addrs::*; use zksync_concurrency::{ctx, ctx::channel, scope, sync}; use zksync_consensus_roles::{ - attester::{self, BatchNumber, L1Batch, L1BatchQC, SignedBatchMsg}, + attester::{self, BatchNumber, L1BatchQC}, node, validator, }; use zksync_consensus_storage::BlockStore; @@ -54,6 +53,10 @@ pub(crate) struct Network { pub(crate) sender: channel::UnboundedSender, /// Queue of block fetching requests. pub(crate) fetch_queue: fetch::Queue, + /// L1 batches. + // pub(crate) l1_batches: sync::watch::Receiver>, + /// Last viewed QC. + pub(crate) last_viewed_qc: Option, /// Attester SecretKey, None if the node is not an attester. pub(crate) attester_key: Option, /// L1 batch qc. @@ -80,6 +83,8 @@ impl Network { batch_signatures: L1BatchSignaturesWatch::default(), attester_key: cfg.attester_key.clone(), l1_batch_qc: HashMap::new(), + // l1_batches: sync::watch::channel(None).1, + last_viewed_qc: None, cfg, fetch_queue: fetch::Queue::default(), block_store, @@ -121,11 +126,41 @@ impl Network { .await; } - pub(crate) async fn update_actual_qc(&self, msg: SignedBatchMsg) { - self.l1_batch_qc - .clone() - .entry(msg.msg.number.clone()) - .or_insert_with(|| L1BatchQC::new(msg.msg.clone(), self.genesis())) - .add(&msg, self.genesis()); + /// Task that keeps hearing about new signatures and updates the L1 batch qc. + /// It also propagates the QC if there's enough signatures. + pub(crate) async fn update_batch_qc(&self, ctx: &ctx::Ctx) { + // FIXME This is not a good way to do this, we shouldn't be verifying the QC every time + loop { + let mut sub = self.batch_signatures.subscribe(); + let signatures = sync::changed(ctx, &mut sub).await.unwrap().clone(); + for (_, sig) in signatures.0 { + self.l1_batch_qc + .clone() + .entry(sig.msg.number.clone()) + .or_insert_with(|| L1BatchQC::new(sig.msg.clone(), self.genesis())) + .add(&sig, self.genesis()); + } + // Now we check if we have enough weight to continue. + if let Some(last_qc) = self.last_viewed_qc.clone() { + let weight = self.genesis().attesters.weight( + &self + .l1_batch_qc + .get(&last_qc.message.number) + .unwrap() + .signers, + ); + if weight > self.genesis().attesters.threshold() { + // TODO: Verify and Propagate QC. + }; + } else if let Some(qc) = self.l1_batch_qc.get(&BatchNumber(0)) { + let weight = self + .genesis() + .attesters + .weight(&self.l1_batch_qc.get(&qc.message.number).unwrap().signers); + if weight > self.genesis().attesters.threshold() { + // TODO: Verify and Propagate QC. + }; + } + } } } diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index cac308bf..5c5a521d 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -2,7 +2,10 @@ use anyhow::Context as _; use std::sync::Arc; use tracing::Instrument as _; -use zksync_concurrency::{ctx, ctx::channel, limiter, scope}; +use zksync_concurrency::{ + ctx::{self, channel}, + limiter, scope, +}; use zksync_consensus_storage::BlockStore; use zksync_consensus_utils::pipe::ActorPipe; @@ -119,6 +122,12 @@ impl Runner { Ok(()) }); + // Update QC batches in the background. + s.spawn(async { + self.net.gossip.update_batch_qc(ctx).await; + Ok(()) + }); + // Maintain static gossip connections. for (peer, addr) in &self.net.gossip.cfg.gossip.static_outbound { s.spawn::<()>(async { @@ -152,6 +161,10 @@ impl Runner { } } + if let Some(_c) = &self.net.gossip.attester_key { + // TODO: check for batches to sign + } + let accept_limiter = limiter::Limiter::new(ctx, self.net.gossip.cfg.tcp_accept_rate); loop { accept_limiter.acquire(ctx, 1).await?; diff --git a/node/libs/roles/src/attester/messages/l1_batch.rs b/node/libs/roles/src/attester/messages/l1_batch.rs index 2118612c..6c4e6ded 100644 --- a/node/libs/roles/src/attester/messages/l1_batch.rs +++ b/node/libs/roles/src/attester/messages/l1_batch.rs @@ -11,6 +11,13 @@ use super::{SignedBatchMsg, Signers}; /// A batch number. pub struct BatchNumber(pub u64); +impl BatchNumber { + /// Increment the batch number. + pub fn next_batch_number(&mut self) -> BatchNumber { + BatchNumber(self.0.checked_add(1).unwrap_or(0)) + } +} + /// A message to send by attesters to the gossip network. /// It contains the attester signature to sign the block batches to be sent to L1. #[derive(Clone, Debug, PartialEq, Eq, Hash)] From e9ad0fa50f7e2480965767999437bc471c4c5968 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 3 May 2024 15:05:22 -0300 Subject: [PATCH 31/79] Add tests and update test setup to get batches --- node/actors/network/src/gossip/testonly.rs | 2 +- node/actors/network/src/gossip/tests/mod.rs | 179 +++++++++++++++++++- node/libs/roles/src/validator/testonly.rs | 13 +- 3 files changed, 190 insertions(+), 4 deletions(-) diff --git a/node/actors/network/src/gossip/testonly.rs b/node/actors/network/src/gossip/testonly.rs index fcf8bee4..d517165c 100644 --- a/node/actors/network/src/gossip/testonly.rs +++ b/node/actors/network/src/gossip/testonly.rs @@ -1,7 +1,7 @@ #![allow(dead_code)] use super::*; use crate::{frame, mux, noise, preface, rpc, Config, GossipConfig}; -use anyhow::Context as _; +use anyhow::Context; use rand::Rng as _; use std::collections::BTreeMap; use zksync_concurrency::{ctx, limiter}; diff --git a/node/actors/network/src/gossip/tests/mod.rs b/node/actors/network/src/gossip/tests/mod.rs index 60fabcf7..48feec61 100644 --- a/node/actors/network/src/gossip/tests/mod.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -1,6 +1,6 @@ use super::*; use crate::{metrics, preface, rpc, testonly}; -use anyhow::Context as _; +use anyhow::Context; use assert_matches::assert_matches; use pretty_assertions::assert_eq; use rand::Rng; @@ -14,7 +14,10 @@ use zksync_concurrency::{ testonly::{abort_on_panic, set_timeout}, time, }; -use zksync_consensus_roles::validator; +use zksync_consensus_roles::{ + attester::{L1Batch, SignedBatchMsg}, + validator, +}; use zksync_consensus_storage::testonly::new_store; mod fetch; @@ -86,6 +89,9 @@ fn mk_version(rng: &mut R) -> u64 { #[derive(Default)] struct View(im::HashMap>>); +#[derive(Default)] +struct Signatures(im::HashMap>>); + fn mk_netaddr( key: &validator::SecretKey, addr: std::net::SocketAddr, @@ -99,6 +105,14 @@ fn mk_netaddr( }) } +fn mk_batch( + key: &attester::SecretKey, + number: BatchNumber, + timestamp: time::Utc, +) -> SignedBatchMsg { + key.sign_batch_msg(L1Batch { number, timestamp }) +} + fn random_netaddr( rng: &mut R, key: &validator::SecretKey, @@ -111,6 +125,17 @@ fn random_netaddr( )) } +fn random_signature( + rng: &mut R, + key: &attester::SecretKey, +) -> Arc> { + let batch = L1Batch { + timestamp: mk_timestamp(rng), + number: BatchNumber(rng.gen_range(0..1000)), + }; + Arc::new(key.sign_batch_msg(batch.to_owned())) +} + fn update_netaddr( rng: &mut R, addr: &validator::NetAddress, @@ -126,6 +151,20 @@ fn update_netaddr( )) } +fn update_signature( + _rng: &mut R, + batch: &L1Batch, + key: &attester::SecretKey, + batch_number_diff: i64, + timestamp_diff: time::Duration, +) -> Arc> { + let batch = L1Batch { + timestamp: batch.timestamp + timestamp_diff, + number: BatchNumber((batch.number.0 as i64 + batch_number_diff) as u64), + }; + Arc::new(key.sign_batch_msg(batch.to_owned())) +} + impl View { fn insert(&mut self, entry: Arc>) { self.0.insert(entry.key.clone(), entry); @@ -140,6 +179,20 @@ impl View { } } +impl Signatures { + fn insert(&mut self, entry: Arc>) { + self.0.insert(entry.key.clone(), entry); + } + + fn get(&mut self, key: &attester::SecretKey) -> Arc> { + self.0.get(&key.public()).unwrap().clone() + } + + fn as_vec(&self) -> Vec>> { + self.0.values().cloned().collect() + } +} + #[tokio::test] async fn test_validator_addrs() { abort_on_panic(); @@ -465,3 +518,125 @@ async fn rate_limiting() { assert!((1..=2).contains(&got), "got {got} want 1 or 2"); } } + +#[tokio::test] +async fn test_batch_signatures() { + abort_on_panic(); + let rng = &mut ctx::test_root(&ctx::RealClock).rng(); + + let keys: Vec = (0..8).map(|_| rng.gen()).collect(); + let attesters = attester::Committee::new(keys.iter().map(|k| attester::WeightedAttester { + key: k.public(), + weight: 1250, + })) + .unwrap(); + let signatures = L1BatchSignaturesWatch::default(); + let mut sub = signatures.subscribe(); + + // Initial values. + let mut want = Signatures::default(); + for k in &keys[0..6] { + want.insert(random_signature(rng, k)); + } + signatures.update(&attesters, &want.as_vec()).await.unwrap(); + assert_eq!(want.0, sub.borrow_and_update().0); + + // Update values. + let delta = time::Duration::seconds(10); + // newer batch number + let k0v2 = update_signature(rng, &want.get(&keys[0]).msg, &keys[0], 1, -delta); + // same batch number, newer timestamp + let k1v2 = update_signature(rng, &want.get(&keys[1]).msg, &keys[1], 0, delta); + // same batch number, same timestamp + let k2v2 = update_signature( + rng, + &want.get(&keys[2]).msg, + &keys[2], + 0, + time::Duration::ZERO, + ); + // same batch number, older timestamp + let k3v2 = update_signature(rng, &want.get(&keys[3]).msg, &keys[3], 0, -delta); + // older batch number + let k4v2 = update_signature(rng, &want.get(&keys[4]).msg, &keys[4], -1, delta); + // first entry for a key in the config + let k6v1 = random_signature(rng, &keys[6]); + // entry for a key outside of the config + let k8 = rng.gen(); + let k8v1 = random_signature(rng, &k8); + + want.insert(k0v2.clone()); + want.insert(k1v2.clone()); + want.insert(k6v1.clone()); + let update = [ + k0v2, + k1v2, + k2v2, + k3v2, + k4v2, + // no new entry for keys[5] + k6v1, + // no entry at all for keys[7] + k8v1.clone(), + ]; + signatures.update(&attesters, &update).await.unwrap(); + assert_eq!(want.0, sub.borrow_and_update().0); + + // Invalid signature. + let mut k0v3 = mk_batch( + &keys[1], + BatchNumber(rng.gen_range(0..1000)), + time::UNIX_EPOCH + time::Duration::seconds(rng.gen_range(0..1000000000)), + ); + k0v3.key = keys[0].public(); + assert!(signatures + .update(&attesters, &[Arc::new(k0v3)]) + .await + .is_err()); + assert_eq!(want.0, sub.borrow_and_update().0); + + // Duplicate entry in the update. + assert!(signatures + .update(&attesters, &[k8v1.clone(), k8v1]) + .await + .is_err()); + assert_eq!(want.0, sub.borrow_and_update().0); +} + +// #[tokio::test(flavor = "multi_thread")] +// async fn test_batch_signatures_propagation() { +// abort_on_panic(); +// let ctx = &ctx::test_root(&ctx::AffineClock::new(40.)); +// let rng = &mut ctx.rng(); +// let mut setup = validator::testonly::Setup::new(rng, 10); +// let cfgs = testonly::new_configs(rng, &setup, 1); +// setup.push_batch(L1Batch { +// number: BatchNumber(0), +// timestamp: time::UNIX_EPOCH, +// }); + +// scope::run!(ctx, |ctx, s| async { +// let (store, runner) = new_store(ctx, &setup.genesis).await; +// s.spawn_bg(runner.run(ctx)); +// let nodes: Vec<_> = cfgs +// .iter() +// .enumerate() +// .map(|(i, cfg)| { +// let (node, runner) = testonly::Instance::new(cfg.clone(), store.clone()); +// s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); +// node +// }) +// .collect(); +// for (i, node) in nodes.iter().enumerate() { +// let sub = &mut node.net.gossip.batch_signatures.subscribe(); +// sync::wait_for(ctx, sub, |got| { +// println!("{:?}", got.0.values().cloned().collect::>()); +// setup.signed_l1_batches == got.0.values().cloned().collect::>() +// }) +// .await?; +// } +// Ok(()) +// }) +// .await +// .unwrap(); +// } diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 6513968b..859dc90e 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -1,5 +1,5 @@ //! Test-only utilities. -use crate::attester::{self, WeightedAttester}; +use crate::attester::{self, L1Batch, SignedBatchMsg, WeightedAttester}; use super::{ AggregateSignature, BlockHeader, BlockNumber, CommitQC, Committee, ConsensusMsg, FinalBlock, @@ -51,6 +51,7 @@ impl Setup { attester_keys, genesis, blocks: vec![], + signed_l1_batches: vec![], }) } @@ -124,6 +125,14 @@ impl Setup { let first = self.0.blocks.first()?.number(); self.0.blocks.get(n.0.checked_sub(first.0)? as usize) } + + /// Pushes a new L1 batch. + pub fn push_batch(&mut self, batch: L1Batch) { + for key in &self.0.attester_keys { + let signed = key.sign_batch_msg(batch.clone()); + self.0.signed_l1_batches.push(signed); + } + } } /// Setup. @@ -135,6 +144,8 @@ pub struct SetupInner { pub attester_keys: Vec, /// Past blocks. pub blocks: Vec, + /// L1 batches + pub signed_l1_batches: Vec>, /// Genesis config. pub genesis: Genesis, } From 8365c9f2adc68328ed0a888c91760b0d27338b51 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 3 May 2024 17:58:46 -0300 Subject: [PATCH 32/79] Fix genesis hash test --- node/libs/roles/src/attester/tests.rs | 5 +---- node/libs/roles/src/validator/conv.rs | 12 ++++++++++-- node/libs/roles/src/validator/messages/consensus.rs | 1 - node/libs/roles/src/validator/messages/tests.rs | 2 +- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index 887afaac..bdb31259 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -1,7 +1,4 @@ -use crate::{ - attester, - validator::{testonly::Setup, Committee, Genesis}, -}; +use crate::{attester, validator::testonly::Setup}; use super::*; use assert_matches::assert_matches; diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 4966f69b..aa91013f 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,5 +1,5 @@ use crate::{ - attester::{self}, + attester::{self, WeightedAttester}, node::SessionId, }; @@ -26,6 +26,13 @@ impl ProtoFmt for GenesisRaw { .map(|(i, v)| WeightedValidator::read(v).context(i)) .collect::>() .context("validators_v1")?; + let attesters: Vec<_> = r + .attesters + .iter() + .enumerate() + .map(|(i, v)| WeightedAttester::read(v).context(i)) + .collect::>() + .context("attesters")?; Ok(GenesisRaw { chain_id: ChainId(*required(&r.chain_id).context("chain_id")?), fork_number: ForkNumber(*required(&r.fork_number).context("fork_number")?), @@ -34,7 +41,8 @@ impl ProtoFmt for GenesisRaw { protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), validators_committee: Committee::new(validators.into_iter()) .context("validators_v1")?, - attesters_committee: attester::Committee::new(vec![]).context("attesters")?, + attesters_committee: attester::Committee::new(attesters.into_iter()) + .context("attesters")?, leader_selection: read_required(&r.leader_selection).context("leader_selection")?, }) } diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index 1eb820b3..fbf21b7e 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -232,7 +232,6 @@ pub struct ChainId(pub u64); pub struct GenesisRaw { /// ID of the blockchain. pub chain_id: ChainId, - /// Number of the fork. Should be incremented every time the genesis is updated, /// i.e. whenever a hard fork is performed. pub fork_number: ForkNumber, diff --git a/node/libs/roles/src/validator/messages/tests.rs b/node/libs/roles/src/validator/messages/tests.rs index 01dbdd30..fc98bc18 100644 --- a/node/libs/roles/src/validator/messages/tests.rs +++ b/node/libs/roles/src/validator/messages/tests.rs @@ -162,7 +162,7 @@ mod version1 { #[test] fn genesis_hash_change_detector() { let want: GenesisHash = Text::new( - "genesis_hash:keccak256:13a16cfa758c6716b4c4d40a5fe71023a016c7507b7893c7dc775f4420fc5d61", + "genesis_hash:keccak256:40c04eae06ec38220ae8b2ec070090af9117e6935e66f90e076617a687b7a665", ) .decode() .unwrap(); From 05f80e24474147035282094a9f2fc6888c4834ba Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 6 May 2024 13:11:06 -0300 Subject: [PATCH 33/79] Fix encoding tests and hash back to previous version --- node/libs/roles/src/validator/messages/tests.rs | 3 ++- node/libs/roles/src/validator/tests.rs | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/node/libs/roles/src/validator/messages/tests.rs b/node/libs/roles/src/validator/messages/tests.rs index fc98bc18..301f38e5 100644 --- a/node/libs/roles/src/validator/messages/tests.rs +++ b/node/libs/roles/src/validator/messages/tests.rs @@ -159,10 +159,11 @@ mod version1 { /// Note that genesis is NOT versioned by ProtocolVersion. /// Even if it was, ALL versions of genesis need to be supported FOREVER, /// unless we introduce dynamic regenesis. + /// FIXME: This fails with the new attester committee. #[test] fn genesis_hash_change_detector() { let want: GenesisHash = Text::new( - "genesis_hash:keccak256:40c04eae06ec38220ae8b2ec070090af9117e6935e66f90e076617a687b7a665", + "genesis_hash:keccak256:13a16cfa758c6716b4c4d40a5fe71023a016c7507b7893c7dc775f4420fc5d61", ) .decode() .unwrap(); diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index e4183c14..f65c10b9 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -96,8 +96,8 @@ fn test_schema_encoding() { test_encode_random::(rng); test_encode_random::(rng); test_encode_random::(rng); - test_encode_random::(rng); test_encode_random::(rng); + test_encode_random::(rng); test_encode_random::(rng); test_encode_random::(rng); } From df7b777a5a0a8634104abec24e34c5fa0bb2a00c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 6 May 2024 14:39:41 -0300 Subject: [PATCH 34/79] Fix deny --- node/Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 41bba3ec..caa3d014 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -1951,9 +1951,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -1962,9 +1962,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73541b156d32197eecda1a4014d7f868fd2bcb3c550d5386087cfba442bf69c" +checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" dependencies = [ "pest", "pest_generator", @@ -1972,9 +1972,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c35eeed0a3fab112f75165fdc026b3913f4183133f19b49be773ac9ea966e8bd" +checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" dependencies = [ "pest", "pest_meta", @@ -1985,9 +1985,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2adbf29bb9776f28caece835398781ab24435585fe0d4dc1374a61db5accedca" +checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" dependencies = [ "once_cell", "pest", From e77db655badf85aafbb8263be78f3b668021c495 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 6 May 2024 15:40:32 -0300 Subject: [PATCH 35/79] Add test disclaimer --- node/actors/network/src/gossip/tests/mod.rs | 36 ++------------------- 1 file changed, 2 insertions(+), 34 deletions(-) diff --git a/node/actors/network/src/gossip/tests/mod.rs b/node/actors/network/src/gossip/tests/mod.rs index 48feec61..4f852655 100644 --- a/node/actors/network/src/gossip/tests/mod.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -603,40 +603,8 @@ async fn test_batch_signatures() { assert_eq!(want.0, sub.borrow_and_update().0); } +// TODO: This test is disabled because the logic for attesters to receive and sign batches is not implemented yet. +// It should be re-enabled once the logic is implemented. // #[tokio::test(flavor = "multi_thread")] // async fn test_batch_signatures_propagation() { -// abort_on_panic(); -// let ctx = &ctx::test_root(&ctx::AffineClock::new(40.)); -// let rng = &mut ctx.rng(); -// let mut setup = validator::testonly::Setup::new(rng, 10); -// let cfgs = testonly::new_configs(rng, &setup, 1); -// setup.push_batch(L1Batch { -// number: BatchNumber(0), -// timestamp: time::UNIX_EPOCH, -// }); - -// scope::run!(ctx, |ctx, s| async { -// let (store, runner) = new_store(ctx, &setup.genesis).await; -// s.spawn_bg(runner.run(ctx)); -// let nodes: Vec<_> = cfgs -// .iter() -// .enumerate() -// .map(|(i, cfg)| { -// let (node, runner) = testonly::Instance::new(cfg.clone(), store.clone()); -// s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); -// node -// }) -// .collect(); -// for (i, node) in nodes.iter().enumerate() { -// let sub = &mut node.net.gossip.batch_signatures.subscribe(); -// sync::wait_for(ctx, sub, |got| { -// println!("{:?}", got.0.values().cloned().collect::>()); -// setup.signed_l1_batches == got.0.values().cloned().collect::>() -// }) -// .await?; -// } -// Ok(()) -// }) -// .await -// .unwrap(); // } From e86aa92f3326e8e20b16f4dbfb0581a447dbd7ae Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 6 May 2024 18:53:11 -0300 Subject: [PATCH 36/79] Update qc construction implementation --- node/actors/network/src/gossip/mod.rs | 43 ++++++++++++------- node/actors/network/src/lib.rs | 2 + .../roles/src/attester/messages/l1_batch.rs | 2 +- 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index d0dc9ad5..014fde2d 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -13,8 +13,12 @@ //! eclipse attack. Dynamic connections are supposed to improve the properties of the gossip //! network graph (minimize its diameter, increase connectedness). use crate::{gossip::ValidatorAddrsWatch, io, pool::PoolWatch, Config}; +use anyhow::Context; use im::HashMap; -use std::sync::{atomic::AtomicUsize, Arc}; +use std::{ + borrow::Borrow, + sync::{atomic::AtomicUsize, Arc}, +}; pub(crate) use validator_addrs::*; use zksync_concurrency::{ctx, ctx::channel, scope, sync}; use zksync_consensus_roles::{ @@ -127,12 +131,16 @@ impl Network { } /// Task that keeps hearing about new signatures and updates the L1 batch qc. - /// It also propagates the QC if there's enough signatures. - pub(crate) async fn update_batch_qc(&self, ctx: &ctx::Ctx) { + /// It will propagate the QC if there's enough signatures. + pub(crate) async fn update_batch_qc(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { // FIXME This is not a good way to do this, we shouldn't be verifying the QC every time + // Can we get only the latest signatures? loop { let mut sub = self.batch_signatures.subscribe(); - let signatures = sync::changed(ctx, &mut sub).await.unwrap().clone(); + let signatures = sync::changed(ctx, &mut sub) + .await + .context("batch signatures")? + .clone(); for (_, sig) in signatures.0 { self.l1_batch_qc .clone() @@ -145,22 +153,27 @@ impl Network { let weight = self.genesis().attesters_committee.weight( &self .l1_batch_qc - .get(&last_qc.message.number) - .unwrap() + .get(&last_qc.message.number.next_batch_number()) + .context("last qc")? .signers, ); - if weight > self.genesis().attesters_committee.threshold() { - // TODO: Verify and Propagate QC. + if weight < self.genesis().attesters_committee.threshold() { + return Ok(()); }; - } else if let Some(qc) = self.l1_batch_qc.get(&BatchNumber(0)) { - let weight = self - .genesis() - .attesters_committee - .weight(&self.l1_batch_qc.get(&qc.message.number).unwrap().signers); - if weight > self.genesis().attesters_committee.threshold() { - // TODO: Verify and Propagate QC. + } else { + let weight = self.genesis().attesters_committee.weight( + &self + .l1_batch_qc + .get(&BatchNumber(0)) + .context("L1 batch QC")? + .signers, + ); + if weight < self.genesis().attesters_committee.threshold() { + return Ok(()); }; } + + // If we have enough weight, we can propagate the QC. } } } diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index e99b0d7d..b13c8e8f 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -163,6 +163,8 @@ impl Runner { if let Some(_c) = &self.net.gossip.attester_key { // TODO: check for batches to sign + // Sign batches + // Broadcast signature } let accept_limiter = limiter::Limiter::new(ctx, self.net.gossip.cfg.tcp_accept_rate); diff --git a/node/libs/roles/src/attester/messages/l1_batch.rs b/node/libs/roles/src/attester/messages/l1_batch.rs index a6977f4f..0aed6e29 100644 --- a/node/libs/roles/src/attester/messages/l1_batch.rs +++ b/node/libs/roles/src/attester/messages/l1_batch.rs @@ -13,7 +13,7 @@ pub struct BatchNumber(pub u64); impl BatchNumber { /// Increment the batch number. - pub fn next_batch_number(&mut self) -> BatchNumber { + pub fn next_batch_number(&self) -> BatchNumber { BatchNumber(self.0.checked_add(1).unwrap_or(0)) } } From 9511247548b770212a6999b6575b805b24fa10b6 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 10:28:07 -0300 Subject: [PATCH 37/79] Delete committee prefix for validators and attesters in genesis --- node/actors/bft/src/leader/replica_commit.rs | 10 +++------- node/actors/bft/src/leader/replica_prepare.rs | 6 +++--- node/actors/bft/src/leader/tests.rs | 2 +- node/actors/bft/src/testonly/ut_harness.rs | 10 +++++----- node/actors/executor/src/lib.rs | 2 +- node/actors/network/src/consensus/mod.rs | 2 +- node/actors/network/src/consensus/tests.rs | 2 +- node/actors/network/src/gossip/mod.rs | 8 ++++---- node/actors/network/src/gossip/runner.rs | 4 ++-- node/actors/network/src/lib.rs | 2 +- node/actors/network/src/testonly.rs | 4 ++-- .../roles/src/attester/messages/l1_batch.rs | 12 ++++++------ node/libs/roles/src/attester/tests.rs | 14 +++++--------- node/libs/roles/src/validator/conv.rs | 8 ++++---- .../roles/src/validator/messages/consensus.rs | 8 ++++---- .../src/validator/messages/leader_commit.rs | 12 ++++++------ .../src/validator/messages/leader_prepare.rs | 16 ++++++++-------- .../libs/roles/src/validator/messages/tests.rs | 4 ++-- node/libs/roles/src/validator/testonly.rs | 8 ++++---- node/libs/roles/src/validator/tests.rs | 18 +++++++++--------- node/tools/src/tests.rs | 7 +++---- 21 files changed, 75 insertions(+), 84 deletions(-) diff --git a/node/actors/bft/src/leader/replica_commit.rs b/node/actors/bft/src/leader/replica_commit.rs index bb65d570..e44d7a5d 100644 --- a/node/actors/bft/src/leader/replica_commit.rs +++ b/node/actors/bft/src/leader/replica_commit.rs @@ -51,7 +51,7 @@ impl StateMachine { let author = &signed_message.key; // Check that the message signer is in the validator committee. - if !self.config.genesis().validators_committee.contains(author) { + if !self.config.genesis().validators.contains(author) { return Err(Error::NonValidatorSigner { signer: author.clone(), }); @@ -103,11 +103,7 @@ impl StateMachine { .expect("Could not add message to CommitQC"); // Calculate the CommitQC signers weight. - let weight = self - .config - .genesis() - .validators_committee - .weight(&commit_qc.signers); + let weight = self.config.genesis().validators.weight(&commit_qc.signers); // Update commit message current view number for author self.replica_commit_views @@ -122,7 +118,7 @@ impl StateMachine { .retain(|view_number, _| active_views.contains(view_number)); // Now we check if we have enough weight to continue. - if weight < self.config.genesis().validators_committee.threshold() { + if weight < self.config.genesis().validators.threshold() { return Ok(()); }; diff --git a/node/actors/bft/src/leader/replica_prepare.rs b/node/actors/bft/src/leader/replica_prepare.rs index 51e4fb66..a9ab65a5 100644 --- a/node/actors/bft/src/leader/replica_prepare.rs +++ b/node/actors/bft/src/leader/replica_prepare.rs @@ -63,7 +63,7 @@ impl StateMachine { let author = &signed_message.key; // Check that the message signer is in the validator set. - if !self.config.genesis().validators_committee.contains(author) { + if !self.config.genesis().validators.contains(author) { return Err(Error::NonValidatorSigner { signer: author.clone(), }); @@ -114,7 +114,7 @@ impl StateMachine { .expect("Could not add message to PrepareQC"); // Calculate the PrepareQC signers weight. - let weight = prepare_qc.weight(&self.config.genesis().validators_committee); + let weight = prepare_qc.weight(&self.config.genesis().validators); // Update prepare message current view number for author self.replica_prepare_views @@ -129,7 +129,7 @@ impl StateMachine { .retain(|view_number, _| active_views.contains(view_number)); // Now we check if we have enough weight to continue. - if weight < self.config.genesis().validators_committee.threshold() { + if weight < self.config.genesis().validators.threshold() { return Ok(()); } diff --git a/node/actors/bft/src/leader/tests.rs b/node/actors/bft/src/leader/tests.rs index 7a6af02f..9d22885b 100644 --- a/node/actors/bft/src/leader/tests.rs +++ b/node/actors/bft/src/leader/tests.rs @@ -374,7 +374,7 @@ async fn replica_prepare_different_messages() { let mut replica_commit_result = None; // The rest of the validators until threshold sign other_replica_prepare - for i in validators / 2..util.genesis().validators_committee.threshold() as usize { + for i in validators / 2..util.genesis().validators.threshold() as usize { replica_commit_result = util .process_replica_prepare(ctx, util.keys[i].sign_msg(other_replica_prepare.clone())) .await diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index 2f997fbc..8a5eb372 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -86,7 +86,7 @@ impl UTHarness { pub(crate) async fn new_many(ctx: &ctx::Ctx) -> (UTHarness, BlockStoreRunner) { let num_validators = 6; let (util, runner) = UTHarness::new(ctx, num_validators).await; - assert!(util.genesis().validators_committee.max_faulty_weight() > 0); + assert!(util.genesis().validators.max_faulty_weight() > 0); (util, runner) } @@ -226,12 +226,12 @@ impl UTHarness { (i + 1) as u64 * self .genesis() - .validators_committee + .validators .iter() .next() .unwrap() .weight - < self.genesis().validators_committee.threshold(), + < self.genesis().validators.threshold(), first_match, ) { (true, _) => assert!(res.unwrap().is_none()), @@ -268,12 +268,12 @@ impl UTHarness { (i + 1) as u64 * self .genesis() - .validators_committee + .validators .iter() .next() .unwrap() .weight - < self.genesis().validators_committee.threshold(), + < self.genesis().validators.threshold(), first_match, ) { (true, _) => res.unwrap(), diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index bedb1240..9c8b3951 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -125,7 +125,7 @@ impl Executor { if !self .block_store .genesis() - .validators_committee + .validators .keys() .any(|key| key == &validator.key.public()) { diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 8a3d31cb..789c2f63 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -158,7 +158,7 @@ impl Network { let key = gossip.cfg.validator_key.clone()?; let validators: HashSet<_> = gossip .genesis() - .validators_committee + .validators .keys() .cloned() .collect(); diff --git a/node/actors/network/src/consensus/tests.rs b/node/actors/network/src/consensus/tests.rs index 943b9def..4c87be33 100644 --- a/node/actors/network/src/consensus/tests.rs +++ b/node/actors/network/src/consensus/tests.rs @@ -166,7 +166,7 @@ async fn test_genesis_mismatch() { .gossip .validator_addrs .update( - &setup.genesis.validators_committee, + &setup.genesis.validators, &[Arc::new(setup.validator_keys[1].sign_msg( validator::NetAddress { addr: *cfgs[1].server_addr, diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 014fde2d..e4e8b982 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -150,25 +150,25 @@ impl Network { } // Now we check if we have enough weight to continue. if let Some(last_qc) = self.last_viewed_qc.clone() { - let weight = self.genesis().attesters_committee.weight( + let weight = self.genesis().attesters.weight( &self .l1_batch_qc .get(&last_qc.message.number.next_batch_number()) .context("last qc")? .signers, ); - if weight < self.genesis().attesters_committee.threshold() { + if weight < self.genesis().attesters.threshold() { return Ok(()); }; } else { - let weight = self.genesis().attesters_committee.weight( + let weight = self.genesis().attesters.weight( &self .l1_batch_qc .get(&BatchNumber(0)) .context("L1 batch QC")? .signers, ); - if weight < self.genesis().attesters_committee.threshold() { + if weight < self.genesis().attesters.threshold() { return Ok(()); }; } diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index 6900b1ac..ebe66e86 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -29,7 +29,7 @@ impl rpc::Handler for PushValidatorAddrsServer<' .fetch_add(1, Ordering::SeqCst); self.0 .validator_addrs - .update(&self.0.genesis().validators_committee, &req.0) + .update(&self.0.genesis().validators, &req.0) .await?; Ok(()) } @@ -47,7 +47,7 @@ impl rpc::Handler for L1BatchServer<'_> { async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::push_signature::Req) -> anyhow::Result<()> { self.0 .batch_signatures - .update(&self.0.genesis().attesters_committee, &req.0) + .update(&self.0.genesis().attesters, &req.0) .await?; Ok(()) } diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index b13c8e8f..cb06c6f5 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -148,7 +148,7 @@ impl Runner { } if let Some(c) = &self.net.consensus { - let validators = &c.gossip.genesis().validators_committee; + let validators = &c.gossip.genesis().validators; // If we are active validator ... if validators.contains(&c.key.public()) { // Maintain outbound connections. diff --git a/node/actors/network/src/testonly.rs b/node/actors/network/src/testonly.rs index efc42292..fd8315c3 100644 --- a/node/actors/network/src/testonly.rs +++ b/node/actors/network/src/testonly.rs @@ -228,7 +228,7 @@ impl Instance { let want: HashSet<_> = self .genesis() - .validators_committee + .validators .keys() .cloned() .collect(); @@ -309,7 +309,7 @@ pub async fn instant_network( node.net .gossip .validator_addrs - .update(&node.genesis().validators_committee, &addrs) + .update(&node.genesis().validators, &addrs) .await .unwrap(); } diff --git a/node/libs/roles/src/attester/messages/l1_batch.rs b/node/libs/roles/src/attester/messages/l1_batch.rs index 0aed6e29..7c4b20b2 100644 --- a/node/libs/roles/src/attester/messages/l1_batch.rs +++ b/node/libs/roles/src/attester/messages/l1_batch.rs @@ -71,7 +71,7 @@ impl L1BatchQC { pub fn new(message: L1Batch, genesis: &Genesis) -> Self { Self { message, - signers: Signers::new(genesis.attesters_committee.len()), + signers: Signers::new(genesis.attesters.len()), signature: attester::AggregateSignature::default(), } } @@ -82,7 +82,7 @@ impl L1BatchQC { if self.message != msg.msg { return; }; - let Some(i) = genesis.attesters_committee.index(&msg.key) else { + let Some(i) = genesis.attesters.index(&msg.key) else { return; }; if self.signers.0[i] { @@ -95,12 +95,12 @@ impl L1BatchQC { /// Verifies the signature of the L1BatchQC. pub fn verify(&self, genesis: &Genesis) -> Result<(), L1BatchQCVerifyError> { use L1BatchQCVerifyError as Error; - if self.signers.len() != genesis.attesters_committee.len() { + if self.signers.len() != genesis.attesters.len() { return Err(Error::BadSignersSet); } // Verify the signers' weight is enough. - let weight = genesis.attesters_committee.weight(&self.signers); - let threshold = genesis.attesters_committee.threshold(); + let weight = genesis.attesters.weight(&self.signers); + let threshold = genesis.attesters.threshold(); if weight < threshold { return Err(Error::NotEnoughSigners { got: weight, @@ -109,7 +109,7 @@ impl L1BatchQC { } let messages_and_keys = genesis - .attesters_committee + .attesters .iter_keys() .enumerate() .filter(|(i, _)| self.signers.0[*i]) diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index bdb31259..9b451076 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -150,11 +150,10 @@ fn test_l1_batch_qc() { let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let mut genesis3 = (*setup1.genesis).clone(); - genesis3.attesters_committee = - attester::Committee::new(setup1.genesis.attesters_committee.iter().take(3).cloned()) - .unwrap(); + genesis3.attesters = + attester::Committee::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(); let genesis3 = genesis3.with_hash(); - let attester_weight = setup1.genesis.attesters_committee.total_weight() / 6; + let attester_weight = setup1.genesis.attesters.total_weight() / 6; for i in 0..setup1.attester_keys.len() + 1 { let mut qc = L1BatchQC::new(make_l1_batch_msg(rng), &setup1.genesis); @@ -162,7 +161,7 @@ fn test_l1_batch_qc() { qc.add(&key.sign_batch_msg(qc.message.clone()), &setup1.genesis); } let expected_weight = i as u64 * attester_weight; - if expected_weight >= setup1.genesis.attesters_committee.threshold() { + if expected_weight >= setup1.genesis.attesters.threshold() { assert!(qc.verify(&setup1.genesis).is_ok()); } else { assert_matches!( @@ -192,10 +191,7 @@ fn test_attester_committee_weights() { for (n, weight) in sums.iter().enumerate() { let key = &setup.attester_keys[n]; qc.add(&key.sign_batch_msg(msg.clone()), &setup.genesis); - assert_eq!( - setup.genesis.attesters_committee.weight(&qc.signers), - *weight - ); + assert_eq!(setup.genesis.attesters.weight(&qc.signers), *weight); } } diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index aa91013f..91ccbc30 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -39,9 +39,9 @@ impl ProtoFmt for GenesisRaw { first_block: BlockNumber(*required(&r.first_block).context("first_block")?), protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), - validators_committee: Committee::new(validators.into_iter()) + validators: Committee::new(validators.into_iter()) .context("validators_v1")?, - attesters_committee: attester::Committee::new(attesters.into_iter()) + attesters: attester::Committee::new(attesters.into_iter()) .context("attesters")?, leader_selection: read_required(&r.leader_selection).context("leader_selection")?, }) @@ -54,11 +54,11 @@ impl ProtoFmt for GenesisRaw { protocol_version: Some(self.protocol_version.0), validators_v1: self - .validators_committee + .validators .iter() .map(|v| v.build()) .collect(), - attesters: self.attesters_committee.iter().map(|v| v.build()).collect(), + attesters: self.attesters.iter().map(|v| v.build()).collect(), leader_selection: Some(self.leader_selection.build()), } } diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index fbf21b7e..df2f3475 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -240,9 +240,9 @@ pub struct GenesisRaw { /// First block of a fork. pub first_block: BlockNumber, /// Set of validators of the chain. - pub validators_committee: Committee, + pub validators: Committee, /// Set of attesters of the chain. - pub attesters_committee: attester::Committee, + pub attesters: attester::Committee, /// The mode used for selecting leader for a given view. pub leader_selection: LeaderSelectionMode, } @@ -307,7 +307,7 @@ impl Genesis { /// Verifies correctness. pub fn verify(&self) -> anyhow::Result<()> { if let LeaderSelectionMode::Sticky(pk) = &self.leader_selection { - if self.validators_committee.index(pk).is_none() { + if self.validators.index(pk).is_none() { anyhow::bail!("leader_selection sticky mode public key is not in committee"); } } @@ -317,7 +317,7 @@ impl Genesis { /// Computes the leader for the given view. pub fn view_leader(&self, view: ViewNumber) -> validator::PublicKey { - self.validators_committee + self.validators .view_leader(view, &self.leader_selection) } diff --git a/node/libs/roles/src/validator/messages/leader_commit.rs b/node/libs/roles/src/validator/messages/leader_commit.rs index bd2dc204..bbcedf1b 100644 --- a/node/libs/roles/src/validator/messages/leader_commit.rs +++ b/node/libs/roles/src/validator/messages/leader_commit.rs @@ -86,7 +86,7 @@ impl CommitQC { pub fn new(message: ReplicaCommit, genesis: &Genesis) -> Self { Self { message, - signers: Signers::new(genesis.validators_committee.len()), + signers: Signers::new(genesis.validators.len()), signature: validator::AggregateSignature::default(), } } @@ -102,7 +102,7 @@ impl CommitQC { if self.message != msg.msg { return Err(Error::InconsistentMessages); }; - let Some(i) = genesis.validators_committee.index(&msg.key) else { + let Some(i) = genesis.validators.index(&msg.key) else { return Err(Error::SignerNotInCommittee { signer: Box::new(msg.key.clone()), }); @@ -121,13 +121,13 @@ impl CommitQC { self.message .verify(genesis) .map_err(Error::InvalidMessage)?; - if self.signers.len() != genesis.validators_committee.len() { + if self.signers.len() != genesis.validators.len() { return Err(Error::BadSignersSet); } // Verify the signers' weight is enough. - let weight = genesis.validators_committee.weight(&self.signers); - let threshold = genesis.validators_committee.threshold(); + let weight = genesis.validators.weight(&self.signers); + let threshold = genesis.validators.threshold(); if weight < threshold { return Err(Error::NotEnoughSigners { got: weight, @@ -137,7 +137,7 @@ impl CommitQC { // Now we can verify the signature. let messages_and_keys = genesis - .validators_committee + .validators .keys() .enumerate() .filter(|(i, _)| self.signers.0[*i]) diff --git a/node/libs/roles/src/validator/messages/leader_prepare.rs b/node/libs/roles/src/validator/messages/leader_prepare.rs index 1957257b..306935b4 100644 --- a/node/libs/roles/src/validator/messages/leader_prepare.rs +++ b/node/libs/roles/src/validator/messages/leader_prepare.rs @@ -80,11 +80,11 @@ impl PrepareQC { for (msg, signers) in &self.map { if let Some(v) = &msg.high_vote { *count.entry(v.proposal).or_default() += - genesis.validators_committee.weight(signers); + genesis.validators.weight(signers); } } // We only take one value from the iterator because there can only be at most one block with a quorum of 2f+1 votes. - let min = 2 * genesis.validators_committee.max_faulty_weight() + 1; + let min = 2 * genesis.validators.max_faulty_weight() + 1; count.into_iter().find(|x| x.1 >= min).map(|x| x.0) } @@ -108,7 +108,7 @@ impl PrepareQC { if msg.msg.view != self.view { return Err(Error::InconsistentViews); } - let Some(i) = genesis.validators_committee.index(&msg.key) else { + let Some(i) = genesis.validators.index(&msg.key) else { return Err(Error::SignerNotInCommittee { signer: Box::new(msg.key.clone()), }); @@ -119,7 +119,7 @@ impl PrepareQC { let e = self .map .entry(msg.msg.clone()) - .or_insert_with(|| Signers::new(genesis.validators_committee.len())); + .or_insert_with(|| Signers::new(genesis.validators.len())); e.0.set(i, true); self.signature.add(&msg.sig); Ok(()) @@ -129,7 +129,7 @@ impl PrepareQC { pub fn verify(&self, genesis: &Genesis) -> Result<(), PrepareQCVerifyError> { use PrepareQCVerifyError as Error; self.view.verify(genesis).map_err(Error::View)?; - let mut sum = Signers::new(genesis.validators_committee.len()); + let mut sum = Signers::new(genesis.validators.len()); // Check the ReplicaPrepare messages. for (i, (msg, signers)) in self.map.iter().enumerate() { @@ -157,8 +157,8 @@ impl PrepareQC { } // Verify the signers' weight is enough. - let weight = genesis.validators_committee.weight(&sum); - let threshold = genesis.validators_committee.threshold(); + let weight = genesis.validators.weight(&sum); + let threshold = genesis.validators.threshold(); if weight < threshold { return Err(Error::NotEnoughSigners { got: weight, @@ -168,7 +168,7 @@ impl PrepareQC { // Now we can verify the signature. let messages_and_keys = self.map.clone().into_iter().flat_map(|(msg, signers)| { genesis - .validators_committee + .validators .keys() .enumerate() .filter(|(i, _)| signers.0[*i]) diff --git a/node/libs/roles/src/validator/messages/tests.rs b/node/libs/roles/src/validator/messages/tests.rs index 301f38e5..7b35c61d 100644 --- a/node/libs/roles/src/validator/messages/tests.rs +++ b/node/libs/roles/src/validator/messages/tests.rs @@ -149,8 +149,8 @@ mod version1 { first_block: BlockNumber(8902834932452), protocol_version: ProtocolVersion(1), - validators_committee: validator_committee(), - attesters_committee: attester_committee(), + validators: validator_committee(), + attesters: attester_committee(), leader_selection: LeaderSelectionMode::Weighted, } .with_hash() diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 801bbd47..1c72855d 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -152,14 +152,14 @@ impl From for Setup { first_block: spec.first_block, protocol_version: spec.protocol_version, - validators_committee: Committee::new(spec.validator_weights.iter().map( + validators: Committee::new(spec.validator_weights.iter().map( |(k, w)| WeightedValidator { key: k.public(), weight: *w, }, )) .unwrap(), - attesters_committee: attester::Committee::new(spec.attester_weights.iter().map( + attesters: attester::Committee::new(spec.attester_weights.iter().map( |(k, w)| WeightedAttester { key: k.public(), weight: *w, @@ -282,8 +282,8 @@ impl Distribution for Standard { first_block: rng.gen(), protocol_version: rng.gen(), - validators_committee: rng.gen(), - attesters_committee: rng.gen(), + validators: rng.gen(), + attesters: rng.gen(), leader_selection: rng.gen(), } } diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index f65c10b9..57f22614 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -220,10 +220,10 @@ fn test_commit_qc() { let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let mut genesis3 = (*setup1.genesis).clone(); - genesis3.validators_committee = - Committee::new(setup1.genesis.validators_committee.iter().take(3).cloned()).unwrap(); + genesis3.validators = + Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(); let genesis3 = genesis3.with_hash(); - let validator_weight = setup1.genesis.validators_committee.total_weight() / 6; + let validator_weight = setup1.genesis.validators.total_weight() / 6; for i in 0..setup1.validator_keys.len() + 1 { let view = rng.gen(); @@ -233,7 +233,7 @@ fn test_commit_qc() { .unwrap(); } let expected_weight = i as u64 * validator_weight; - if expected_weight >= setup1.genesis.validators_committee.threshold() { + if expected_weight >= setup1.genesis.validators.threshold() { qc.verify(&setup1.genesis).unwrap(); } else { assert_matches!( @@ -309,8 +309,8 @@ fn test_prepare_qc() { let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let mut genesis3 = (*setup1.genesis).clone(); - genesis3.validators_committee = - Committee::new(setup1.genesis.validators_committee.iter().take(3).cloned()).unwrap(); + genesis3.validators = + Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(); let genesis3 = genesis3.with_hash(); let view: ViewNumber = rng.gen(); @@ -327,8 +327,8 @@ fn test_prepare_qc() { ) .unwrap(); } - let expected_weight = n as u64 * setup1.genesis.validators_committee.total_weight() / 6; - if expected_weight >= setup1.genesis.validators_committee.threshold() { + let expected_weight = n as u64 * setup1.genesis.validators.total_weight() / 6; + if expected_weight >= setup1.genesis.validators.threshold() { qc.verify(&setup1.genesis).unwrap(); } else { assert_matches!( @@ -423,7 +423,7 @@ fn test_validator_committee_weights() { let key = &setup.validator_keys[n]; qc.add(&key.sign_msg(msg.clone()), &setup.genesis).unwrap(); let signers = &qc.map[&msg]; - assert_eq!(setup.genesis.validators_committee.weight(signers), *weight); + assert_eq!(setup.genesis.validators.weight(signers), *weight); } } diff --git a/node/tools/src/tests.rs b/node/tools/src/tests.rs index b7cef636..2f35d017 100644 --- a/node/tools/src/tests.rs +++ b/node/tools/src/tests.rs @@ -12,10 +12,9 @@ impl Distribution for EncodeDist { let mut genesis: validator::GenesisRaw = rng.gen(); // In order for the genesis to be valid, the sticky leader needs to be in the validator committee. if let LeaderSelectionMode::Sticky(_) = genesis.leader_selection { - let i = rng.gen_range(0..genesis.validators_committee.len()); - genesis.leader_selection = LeaderSelectionMode::Sticky( - genesis.validators_committee.get(i).unwrap().key.clone(), - ); + let i = rng.gen_range(0..genesis.validators.len()); + genesis.leader_selection = + LeaderSelectionMode::Sticky(genesis.validators.get(i).unwrap().key.clone()); } AppConfig { server_addr: self.sample(rng), From 4f548d7b5f3f92586377d575dfb536e4491a5ad6 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 11:25:03 -0300 Subject: [PATCH 38/79] Fix some crate imports --- node/actors/executor/src/lib.rs | 19 ++----------------- node/actors/network/src/consensus/mod.rs | 16 +++------------- node/actors/network/src/gossip/mod.rs | 2 +- node/actors/network/src/gossip/runner.rs | 5 +---- node/actors/network/src/gossip/testonly.rs | 2 +- node/actors/network/src/gossip/tests/mod.rs | 2 +- node/actors/network/src/rpc/push_signature.rs | 2 +- node/deny.toml | 2 +- node/libs/roles/src/attester/conv.rs | 2 +- node/libs/roles/src/attester/messages/msg.rs | 2 +- node/libs/roles/src/validator/messages/msg.rs | 5 +---- 11 files changed, 14 insertions(+), 45 deletions(-) diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 9c8b3951..39ff351d 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -3,7 +3,6 @@ use crate::io::Dispatcher; use anyhow::Context as _; use std::{ collections::{HashMap, HashSet}, - fmt, sync::Arc, }; use zksync_concurrency::{ctx, limiter, net, scope, time}; @@ -19,6 +18,7 @@ mod io; mod tests; /// Validator-related part of [`Executor`]. +#[derive(Debug)] pub struct Validator { /// Consensus network configuration. pub key: validator::SecretKey, @@ -29,27 +29,12 @@ pub struct Validator { } /// Validator-related part of [`Executor`]. +#[derive(Debug)] pub struct Attester { /// Consensus network configuration. pub key: attester::SecretKey, } -impl fmt::Debug for Validator { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("ValidatorExecutor") - .field("key", &self.key) - .finish() - } -} - -impl fmt::Debug for Attester { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("AttesterExecutor") - .field("key", &self.key) - .finish() - } -} - /// Config of the node executor. #[derive(Clone, Debug)] pub struct Config { diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 789c2f63..f3ded417 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -1,11 +1,6 @@ //! Consensus network is a full graph of connections between all validators. //! BFT consensus messages are exchanged over this network. -use crate::{ - config, gossip, io, noise, - pool::PoolWatch, - preface, - rpc::{self}, -}; +use crate::{config, gossip, io, noise, pool::PoolWatch, preface, rpc}; use anyhow::Context as _; use rand::seq::SliceRandom; use std::{ @@ -14,7 +9,7 @@ use std::{ }; use tracing::Instrument as _; use zksync_concurrency::{ctx, oneshot, scope, sync, time}; -use zksync_consensus_roles::validator::{self}; +use zksync_consensus_roles::validator; use zksync_protobuf::kB; mod handshake; @@ -156,12 +151,7 @@ impl Network { /// Constructs a new consensus network state. pub(crate) fn new(gossip: Arc) -> Option> { let key = gossip.cfg.validator_key.clone()?; - let validators: HashSet<_> = gossip - .genesis() - .validators - .keys() - .cloned() - .collect(); + let validators: HashSet<_> = gossip.genesis().validators.keys().cloned().collect(); Some(Arc::new(Self { key, inbound: PoolWatch::new(validators.clone(), 0), diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index e4e8b982..0df2965b 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -13,7 +13,7 @@ //! eclipse attack. Dynamic connections are supposed to improve the properties of the gossip //! network graph (minimize its diameter, increase connectedness). use crate::{gossip::ValidatorAddrsWatch, io, pool::PoolWatch, Config}; -use anyhow::Context; +use anyhow::Context as _; use im::HashMap; use std::{ borrow::Borrow, diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index ebe66e86..0858a1f0 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -1,8 +1,5 @@ use super::{batch_signatures::L1BatchSignatures, handshake, Network, ValidatorAddrs}; -use crate::{ - noise, preface, - rpc::{self}, -}; +use crate::{noise, preface, rpc}; use anyhow::Context as _; use async_trait::async_trait; use rand::seq::SliceRandom; diff --git a/node/actors/network/src/gossip/testonly.rs b/node/actors/network/src/gossip/testonly.rs index d517165c..fcf8bee4 100644 --- a/node/actors/network/src/gossip/testonly.rs +++ b/node/actors/network/src/gossip/testonly.rs @@ -1,7 +1,7 @@ #![allow(dead_code)] use super::*; use crate::{frame, mux, noise, preface, rpc, Config, GossipConfig}; -use anyhow::Context; +use anyhow::Context as _; use rand::Rng as _; use std::collections::BTreeMap; use zksync_concurrency::{ctx, limiter}; diff --git a/node/actors/network/src/gossip/tests/mod.rs b/node/actors/network/src/gossip/tests/mod.rs index 4f852655..eea98537 100644 --- a/node/actors/network/src/gossip/tests/mod.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -1,6 +1,6 @@ use super::*; use crate::{metrics, preface, rpc, testonly}; -use anyhow::Context; +use anyhow::Context as _; use assert_matches::assert_matches; use pretty_assertions::assert_eq; use rand::Rng; diff --git a/node/actors/network/src/rpc/push_signature.rs b/node/actors/network/src/rpc/push_signature.rs index ab0c077f..ab5fc6a0 100644 --- a/node/actors/network/src/rpc/push_signature.rs +++ b/node/actors/network/src/rpc/push_signature.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use crate::{mux, proto::gossip as proto}; -use anyhow::Context; +use anyhow::Context as _; use zksync_consensus_roles::attester::{self, L1Batch}; use zksync_protobuf::ProtoFmt; diff --git a/node/deny.toml b/node/deny.toml index 750aceb8..16a22ab5 100644 --- a/node/deny.toml +++ b/node/deny.toml @@ -76,7 +76,7 @@ skip = [ # Old versions required by kube. { name = "strsim", version = "0.10.0" }, - + # Old versions required by k8s-openapi. { name = "base64", version = "0.21.7" } ] diff --git a/node/libs/roles/src/attester/conv.rs b/node/libs/roles/src/attester/conv.rs index e573f69e..c6971e3c 100644 --- a/node/libs/roles/src/attester/conv.rs +++ b/node/libs/roles/src/attester/conv.rs @@ -1,5 +1,5 @@ use crate::proto::attester::{self as proto}; -use anyhow::Context; +use anyhow::Context as _; use zksync_consensus_crypto::ByteFmt; use zksync_consensus_utils::enum_util::Variant; use zksync_protobuf::{read_required, required, ProtoFmt}; diff --git a/node/libs/roles/src/attester/messages/msg.rs b/node/libs/roles/src/attester/messages/msg.rs index f4f5c0a2..d729e69c 100644 --- a/node/libs/roles/src/attester/messages/msg.rs +++ b/node/libs/roles/src/attester/messages/msg.rs @@ -4,7 +4,7 @@ use crate::{ attester::{L1Batch, PublicKey, Signature}, validator::ViewNumber, }; -use anyhow::Context; +use anyhow::Context as _; use bit_vec::BitVec; use zksync_consensus_crypto::{keccak256, ByteFmt, Text, TextFmt}; use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; diff --git a/node/libs/roles/src/validator/messages/msg.rs b/node/libs/roles/src/validator/messages/msg.rs index 9e1920f2..cc5a047e 100644 --- a/node/libs/roles/src/validator/messages/msg.rs +++ b/node/libs/roles/src/validator/messages/msg.rs @@ -1,9 +1,6 @@ //! Generic message types. use super::{ConsensusMsg, NetAddress}; -use crate::{ - node::SessionId, - validator::{self}, -}; +use crate::{node::SessionId, validator}; use std::fmt; use zksync_consensus_crypto::{keccak256, ByteFmt, Text, TextFmt}; use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; From 5cf4fae3174487b6328f7c3810440739e1261304 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 13:17:34 -0300 Subject: [PATCH 39/79] Update limit rate for rpc requests with batch signatures --- node/actors/network/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs index e7c21cf9..63456582 100644 --- a/node/actors/network/src/config.rs +++ b/node/actors/network/src/config.rs @@ -45,8 +45,8 @@ impl Default for RpcConfig { refresh: time::Duration::ZERO, }, push_l1_batch_signature_rate: limiter::Rate { - burst: 10, - refresh: time::Duration::ZERO, + burst: 2, + refresh: time::Duration::milliseconds(500), }, } } From 03780cb89c822e805a77790268a0e1c49d04eab2 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 13:42:44 -0300 Subject: [PATCH 40/79] Update name for signed messages in proto file --- node/libs/roles/src/attester/conv.rs | 2 +- node/libs/roles/src/proto/attester.proto | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/node/libs/roles/src/attester/conv.rs b/node/libs/roles/src/attester/conv.rs index c6971e3c..90b9065c 100644 --- a/node/libs/roles/src/attester/conv.rs +++ b/node/libs/roles/src/attester/conv.rs @@ -26,7 +26,7 @@ impl ProtoFmt for L1Batch { } impl + Clone> ProtoFmt for SignedBatchMsg { - type Proto = proto::SignedBatch; + type Proto = proto::Signed; fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { msg: V::extract(read_required::(&r.msg).context("msg")?)?, diff --git a/node/libs/roles/src/proto/attester.proto b/node/libs/roles/src/proto/attester.proto index 8276c6a4..5a53cfd6 100644 --- a/node/libs/roles/src/proto/attester.proto +++ b/node/libs/roles/src/proto/attester.proto @@ -21,7 +21,7 @@ message Msg { } } -message SignedBatch { +message Signed { optional Msg msg = 1; // required optional PublicKey key = 2; // required optional Signature sig = 3; // required From 5392a1c67a7580de51cdb5bdc246c8e9454603c8 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 13:55:30 -0300 Subject: [PATCH 41/79] Update attester field in genesis for proto struct --- node/libs/roles/src/proto/validator.proto | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/libs/roles/src/proto/validator.proto b/node/libs/roles/src/proto/validator.proto index 97a19520..f280bf9c 100644 --- a/node/libs/roles/src/proto/validator.proto +++ b/node/libs/roles/src/proto/validator.proto @@ -11,13 +11,13 @@ message Genesis { optional uint64 chain_id = 5; // required optional uint64 fork_number = 6; // required; ForkNumber optional uint64 first_block = 7; // required; BlockNumber - repeated attester.WeightedAttester attesters = 4; // These properties are expected to be overwritten each epoch. // We will either remove them entirely, or keep them for the initial epoch. optional uint32 protocol_version = 8; // required; ProtocolVersion repeated WeightedValidator validators_v1 = 3; - optional LeaderSelectionMode leader_selection = 9; // required + repeated attester.WeightedAttester attesters = 9; + optional LeaderSelectionMode leader_selection = 4; // required } message LeaderSelectionMode { From e0f16de9f461bdc7056926afc3d626ecff6239c9 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 14:02:00 -0300 Subject: [PATCH 42/79] Leave TODO comment for hash of the L1 Batch --- node/libs/roles/src/attester/messages/l1_batch.rs | 1 + node/libs/roles/src/proto/attester.proto | 1 + 2 files changed, 2 insertions(+) diff --git a/node/libs/roles/src/attester/messages/l1_batch.rs b/node/libs/roles/src/attester/messages/l1_batch.rs index 7c4b20b2..d64f3bd4 100644 --- a/node/libs/roles/src/attester/messages/l1_batch.rs +++ b/node/libs/roles/src/attester/messages/l1_batch.rs @@ -26,6 +26,7 @@ pub struct L1Batch { pub number: BatchNumber, /// Time at which this message has been signed. pub timestamp: time::Utc, + // TODO: add the hash of the L1 batch as a field } /// A certificate for a batch of L2 blocks to be sent to L1. diff --git a/node/libs/roles/src/proto/attester.proto b/node/libs/roles/src/proto/attester.proto index 5a53cfd6..2b3f814c 100644 --- a/node/libs/roles/src/proto/attester.proto +++ b/node/libs/roles/src/proto/attester.proto @@ -7,6 +7,7 @@ import "zksync/std.proto"; message L1Batch { optional uint64 number = 1; // required optional std.Timestamp timestamp = 2; // required + // TODO: add the hash of the L1 batch as a field } message L1BatchQC { From 073c22d2bdb595993258cbbf77b022d3e6365e93 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 14:27:50 -0300 Subject: [PATCH 43/79] Update proto genesis struct --- node/actors/network/src/proto/gossip.proto | 2 +- node/actors/network/src/proto/signature.proto | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/node/actors/network/src/proto/gossip.proto b/node/actors/network/src/proto/gossip.proto index 4c306e68..9a0a1f5b 100644 --- a/node/actors/network/src/proto/gossip.proto +++ b/node/actors/network/src/proto/gossip.proto @@ -20,7 +20,7 @@ message PushValidatorAddrs { message PushSignature { // Signed roles.validator.Msg.net_address. - repeated roles.attester.SignedBatch signatures = 1; + repeated roles.attester.Signed signatures = 1; } // State of the local block store. diff --git a/node/actors/network/src/proto/signature.proto b/node/actors/network/src/proto/signature.proto index fcf6bd7d..3b0e842e 100644 --- a/node/actors/network/src/proto/signature.proto +++ b/node/actors/network/src/proto/signature.proto @@ -6,7 +6,7 @@ import "zksync/roles/attester.proto"; import "zksync/std.proto"; message SignatureReq { - optional roles.attester.SignedBatch msg = 1; + optional roles.attester.Signed msg = 1; } message SignatureResp {} From 93b59be5a3eb831e07317c98009502b2a2ffa0b8 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 14:43:29 -0300 Subject: [PATCH 44/79] Delete unused code related to attesters an l1 batches --- node/actors/executor/src/lib.rs | 3 --- node/actors/executor/src/tests.rs | 2 -- node/actors/network/src/config.rs | 3 --- node/actors/network/src/gossip/mod.rs | 6 ------ node/actors/network/src/lib.rs | 6 +----- node/actors/network/src/testonly.rs | 9 +-------- node/tools/src/config.rs | 1 - 7 files changed, 2 insertions(+), 28 deletions(-) diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 39ff351d..04e16a4b 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -80,8 +80,6 @@ pub struct Executor { pub block_store: Arc, /// Validator-specific node data. pub validator: Option, - /// Attester-specific node data. - pub attester: Option, } impl Executor { @@ -92,7 +90,6 @@ impl Executor { public_addr: self.config.public_addr.clone(), gossip: self.config.gossip(), validator_key: self.validator.as_ref().map(|v| v.key.clone()), - attester_key: self.attester.as_ref().map(|v| v.key.clone()), ping_timeout: Some(time::Duration::seconds(10)), max_block_size: self.config.max_payload_size.saturating_add(kB), max_block_queue_size: 20, diff --git a/node/actors/executor/src/tests.rs b/node/actors/executor/src/tests.rs index 855fbd79..9b8c9a1b 100644 --- a/node/actors/executor/src/tests.rs +++ b/node/actors/executor/src/tests.rs @@ -35,7 +35,6 @@ fn validator( replica_store: Box::new(replica_store), payload_manager: Box::new(bft::testonly::RandomPayload(1000)), }), - attester: None, } } @@ -44,7 +43,6 @@ fn fullnode(cfg: &network::Config, block_store: Arc) -> Executor { config: config(cfg), block_store, validator: None, - attester: None, } } diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs index 63456582..e7d4418d 100644 --- a/node/actors/network/src/config.rs +++ b/node/actors/network/src/config.rs @@ -83,9 +83,6 @@ pub struct Config { /// Private key of the validator. /// None if the node is NOT a validator. pub validator_key: Option, - /// Private key of the attester. - /// None if the node is NOT an attester. - pub attester_key: Option, /// Maximal size of the proto-encoded `validator::FinalBlock` in bytes. pub max_block_size: usize, /// If a peer doesn't respond to a ping message within `ping_timeout`, diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 0df2965b..7bd77cbd 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -57,12 +57,8 @@ pub(crate) struct Network { pub(crate) sender: channel::UnboundedSender, /// Queue of block fetching requests. pub(crate) fetch_queue: fetch::Queue, - /// L1 batches. - // pub(crate) l1_batches: sync::watch::Receiver>, /// Last viewed QC. pub(crate) last_viewed_qc: Option, - /// Attester SecretKey, None if the node is not an attester. - pub(crate) attester_key: Option, /// L1 batch qc. pub(crate) l1_batch_qc: HashMap, /// TESTONLY: how many time push_validator_addrs rpc was called by the peers. @@ -85,9 +81,7 @@ impl Network { outbound: PoolWatch::new(cfg.gossip.static_outbound.keys().cloned().collect(), 0), validator_addrs: ValidatorAddrsWatch::default(), batch_signatures: L1BatchSignaturesWatch::default(), - attester_key: cfg.attester_key.clone(), l1_batch_qc: HashMap::new(), - // l1_batches: sync::watch::channel(None).1, last_viewed_qc: None, cfg, fetch_queue: fetch::Queue::default(), diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index cb06c6f5..54e81385 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -161,11 +161,7 @@ impl Runner { } } - if let Some(_c) = &self.net.gossip.attester_key { - // TODO: check for batches to sign - // Sign batches - // Broadcast signature - } + // TODO: check if we are active attester to get new L1 Batches, sign them and broadcast the signature let accept_limiter = limiter::Limiter::new(ctx, self.net.gossip.cfg.tcp_accept_rate); loop { diff --git a/node/actors/network/src/testonly.rs b/node/actors/network/src/testonly.rs index fd8315c3..c0e3c0ae 100644 --- a/node/actors/network/src/testonly.rs +++ b/node/actors/network/src/testonly.rs @@ -90,7 +90,6 @@ pub fn new_configs( // due to timeouts. ping_timeout: None, validator_key: Some(validator_key.clone()), - attester_key: Some(attester_key.clone()), gossip: GossipConfig { key: rng.gen(), dynamic_inbound_limit: usize::MAX, @@ -128,7 +127,6 @@ pub fn new_fullnode(rng: &mut impl Rng, peer: &Config) -> Config { // due to timeouts. ping_timeout: None, validator_key: None, - attester_key: None, gossip: GossipConfig { key: rng.gen(), dynamic_inbound_limit: usize::MAX, @@ -226,12 +224,7 @@ impl Instance { pub async fn wait_for_consensus_connections(&self) { let consensus_state = self.net.consensus.as_ref().unwrap(); - let want: HashSet<_> = self - .genesis() - .validators - .keys() - .cloned() - .collect(); + let want: HashSet<_> = self.genesis().validators.keys().cloned().collect(); consensus_state .inbound .subscribe() diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index c763b4f5..0f950ef4 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -214,7 +214,6 @@ impl Configs { self.app.max_payload_size, )), }), - attester: None, }; Ok((e, runner)) } From a9b5a5a57c746417e8e03d9d471f9a9cdf8a32bc Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 16:14:56 -0300 Subject: [PATCH 45/79] Use qualified names for attester imports --- node/actors/network/src/gossip/tests/mod.rs | 32 +++++++++++---------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/node/actors/network/src/gossip/tests/mod.rs b/node/actors/network/src/gossip/tests/mod.rs index eea98537..709db19d 100644 --- a/node/actors/network/src/gossip/tests/mod.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -14,10 +14,7 @@ use zksync_concurrency::{ testonly::{abort_on_panic, set_timeout}, time, }; -use zksync_consensus_roles::{ - attester::{L1Batch, SignedBatchMsg}, - validator, -}; +use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::testonly::new_store; mod fetch; @@ -90,7 +87,9 @@ fn mk_version(rng: &mut R) -> u64 { struct View(im::HashMap>>); #[derive(Default)] -struct Signatures(im::HashMap>>); +struct Signatures( + im::HashMap>>, +); fn mk_netaddr( key: &validator::SecretKey, @@ -109,8 +108,8 @@ fn mk_batch( key: &attester::SecretKey, number: BatchNumber, timestamp: time::Utc, -) -> SignedBatchMsg { - key.sign_batch_msg(L1Batch { number, timestamp }) +) -> attester::SignedBatchMsg { + key.sign_batch_msg(attester::L1Batch { number, timestamp }) } fn random_netaddr( @@ -128,8 +127,8 @@ fn random_netaddr( fn random_signature( rng: &mut R, key: &attester::SecretKey, -) -> Arc> { - let batch = L1Batch { +) -> Arc> { + let batch = attester::L1Batch { timestamp: mk_timestamp(rng), number: BatchNumber(rng.gen_range(0..1000)), }; @@ -153,12 +152,12 @@ fn update_netaddr( fn update_signature( _rng: &mut R, - batch: &L1Batch, + batch: &attester::L1Batch, key: &attester::SecretKey, batch_number_diff: i64, timestamp_diff: time::Duration, -) -> Arc> { - let batch = L1Batch { +) -> Arc> { + let batch = attester::L1Batch { timestamp: batch.timestamp + timestamp_diff, number: BatchNumber((batch.number.0 as i64 + batch_number_diff) as u64), }; @@ -180,15 +179,18 @@ impl View { } impl Signatures { - fn insert(&mut self, entry: Arc>) { + fn insert(&mut self, entry: Arc>) { self.0.insert(entry.key.clone(), entry); } - fn get(&mut self, key: &attester::SecretKey) -> Arc> { + fn get( + &mut self, + key: &attester::SecretKey, + ) -> Arc> { self.0.get(&key.public()).unwrap().clone() } - fn as_vec(&self) -> Vec>> { + fn as_vec(&self) -> Vec>> { self.0.values().cloned().collect() } } From 5c0e7b77be87699baa58c7c5b1ce368eeb72f5d9 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 17:38:52 -0300 Subject: [PATCH 46/79] Renamed most of batch related structs for consistency --- node/actors/network/src/config.rs | 6 +-- .../network/src/gossip/batch_signatures.rs | 34 ++++++------- node/actors/network/src/gossip/mod.rs | 32 +++++------- node/actors/network/src/gossip/runner.rs | 24 +++++---- node/actors/network/src/gossip/tests/mod.rs | 49 +++++++++---------- node/actors/network/src/lib.rs | 2 +- node/actors/network/src/proto/gossip.proto | 2 +- node/actors/network/src/proto/signature.proto | 12 ----- node/actors/network/src/rpc/mod.rs | 2 +- ...h_signature.rs => push_batch_signature.rs} | 6 +-- node/actors/network/src/testonly.rs | 46 ++++++++--------- node/libs/roles/src/attester/conv.rs | 18 +++---- .../src/attester/keys/aggregate_signature.rs | 4 +- .../roles/src/attester/keys/secret_key.rs | 6 +-- .../messages/{l1_batch.rs => batch.rs} | 28 +++++------ node/libs/roles/src/attester/messages/mod.rs | 4 +- node/libs/roles/src/attester/messages/msg.rs | 20 ++++---- node/libs/roles/src/attester/testonly.rs | 24 ++++----- node/libs/roles/src/attester/tests.rs | 22 ++++----- node/libs/roles/src/proto/attester.proto | 8 +-- node/libs/roles/src/validator/testonly.rs | 28 +++++------ 21 files changed, 177 insertions(+), 200 deletions(-) delete mode 100644 node/actors/network/src/proto/signature.proto rename node/actors/network/src/rpc/{push_signature.rs => push_batch_signature.rs} (85%) rename node/libs/roles/src/attester/messages/{l1_batch.rs => batch.rs} (83%) diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs index e7d4418d..30326c0b 100644 --- a/node/actors/network/src/config.rs +++ b/node/actors/network/src/config.rs @@ -1,7 +1,7 @@ //! Network actor configs. use std::collections::{HashMap, HashSet}; use zksync_concurrency::{limiter, net, time}; -use zksync_consensus_roles::{attester, node, validator}; +use zksync_consensus_roles::{node, validator}; /// How often we should retry to establish a connection to a validator. /// TODO(gprusak): once it becomes relevant, choose a more appropriate retry strategy. @@ -21,7 +21,7 @@ pub struct RpcConfig { /// Max rate of sending/receiving consensus messages. pub consensus_rate: limiter::Rate, /// Max rate of sending/receiving l1 batch signature messages. - pub push_l1_batch_signature_rate: limiter::Rate, + pub push_batch_signature_rate: limiter::Rate, } impl Default for RpcConfig { @@ -44,7 +44,7 @@ impl Default for RpcConfig { burst: 10, refresh: time::Duration::ZERO, }, - push_l1_batch_signature_rate: limiter::Rate { + push_batch_signature_rate: limiter::Rate { burst: 2, refresh: time::Duration::milliseconds(500), }, diff --git a/node/actors/network/src/gossip/batch_signatures.rs b/node/actors/network/src/gossip/batch_signatures.rs index 63f1fbe9..30475b5e 100644 --- a/node/actors/network/src/gossip/batch_signatures.rs +++ b/node/actors/network/src/gossip/batch_signatures.rs @@ -2,18 +2,18 @@ use crate::watch::Watch; use std::{collections::HashSet, sync::Arc}; use zksync_concurrency::sync; -use zksync_consensus_roles::attester::{self, L1Batch}; +use zksync_consensus_roles::attester::{self, Batch}; -/// Mapping from attester::PublicKey to a signed attester::L1Batch message. +/// Mapping from attester::PublicKey to a signed attester::Batch message. /// Represents the currents state of node's knowledge about the attester signatures. #[derive(Clone, Default, PartialEq, Eq)] -pub(crate) struct L1BatchSignatures( - pub(super) im::HashMap>>, +pub(crate) struct BatchSignatures( + pub(super) im::HashMap>>, ); -impl L1BatchSignatures { +impl BatchSignatures { /// Returns a set of entries of `self` which are newer than the entries in `b`. - pub(super) fn get_newer(&self, b: &Self) -> Vec>> { + pub(super) fn get_newer(&self, b: &Self) -> Vec>> { let mut newer = vec![]; for (k, v) in &self.0 { if let Some(bv) = b.0.get(k) { @@ -34,7 +34,7 @@ impl L1BatchSignatures { pub(super) fn update( &mut self, attesters: &attester::Committee, - data: &[Arc>], + data: &[Arc>], ) -> anyhow::Result { let mut changed = false; @@ -66,23 +66,23 @@ impl L1BatchSignatures { } } -/// Watch wrapper of L1BatchSignatures, -/// which supports subscribing to L1BatchSignatures updates. -pub(crate) struct L1BatchSignaturesWatch(Watch); +/// Watch wrapper of BatchSignatures, +/// which supports subscribing to BatchSignatures updates. +pub(crate) struct BatchSignaturesWatch(Watch); -impl Default for L1BatchSignaturesWatch { +impl Default for BatchSignaturesWatch { fn default() -> Self { - Self(Watch::new(L1BatchSignatures::default())) + Self(Watch::new(BatchSignatures::default())) } } -impl L1BatchSignaturesWatch { - /// Subscribes to L1BatchSignatures updates. - pub(crate) fn subscribe(&self) -> sync::watch::Receiver { +impl BatchSignaturesWatch { + /// Subscribes to BatchSignatures updates. + pub(crate) fn subscribe(&self) -> sync::watch::Receiver { self.0.subscribe() } - /// Inserts data to L1BatchSignatures. + /// Inserts data to BatchSignatures. /// Subscribers are notified iff at least 1 new entry has /// been inserted. Returns an error iff an invalid /// entry in `data` has been found. The provider of the @@ -90,7 +90,7 @@ impl L1BatchSignaturesWatch { pub(crate) async fn update( &self, attesters: &attester::Committee, - data: &[Arc>], + data: &[Arc>], ) -> anyhow::Result<()> { let this = self.0.lock().await; let mut signatures = this.borrow().clone(); diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 7bd77cbd..ba509aab 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -15,19 +15,13 @@ use crate::{gossip::ValidatorAddrsWatch, io, pool::PoolWatch, Config}; use anyhow::Context as _; use im::HashMap; -use std::{ - borrow::Borrow, - sync::{atomic::AtomicUsize, Arc}, -}; +use std::sync::{atomic::AtomicUsize, Arc}; pub(crate) use validator_addrs::*; use zksync_concurrency::{ctx, ctx::channel, scope, sync}; -use zksync_consensus_roles::{ - attester::{self, BatchNumber, L1BatchQC}, - node, validator, -}; +use zksync_consensus_roles::{attester, node, validator}; use zksync_consensus_storage::BlockStore; -use self::batch_signatures::L1BatchSignaturesWatch; +use self::batch_signatures::BatchSignaturesWatch; mod batch_signatures; mod fetch; @@ -50,7 +44,7 @@ pub(crate) struct Network { /// Current state of knowledge about validators' endpoints. pub(crate) validator_addrs: ValidatorAddrsWatch, /// Current state of knowledge about batch signatures. - pub(crate) batch_signatures: L1BatchSignaturesWatch, + pub(crate) batch_signatures: BatchSignaturesWatch, /// Block store to serve `get_block` requests from. pub(crate) block_store: Arc, /// Output pipe of the network actor. @@ -58,9 +52,9 @@ pub(crate) struct Network { /// Queue of block fetching requests. pub(crate) fetch_queue: fetch::Queue, /// Last viewed QC. - pub(crate) last_viewed_qc: Option, + pub(crate) last_viewed_qc: Option, /// L1 batch qc. - pub(crate) l1_batch_qc: HashMap, + pub(crate) batch_qc: HashMap, /// TESTONLY: how many time push_validator_addrs rpc was called by the peers. pub(crate) push_validator_addrs_calls: AtomicUsize, } @@ -80,8 +74,8 @@ impl Network { ), outbound: PoolWatch::new(cfg.gossip.static_outbound.keys().cloned().collect(), 0), validator_addrs: ValidatorAddrsWatch::default(), - batch_signatures: L1BatchSignaturesWatch::default(), - l1_batch_qc: HashMap::new(), + batch_signatures: BatchSignaturesWatch::default(), + batch_qc: HashMap::new(), last_viewed_qc: None, cfg, fetch_queue: fetch::Queue::default(), @@ -136,17 +130,17 @@ impl Network { .context("batch signatures")? .clone(); for (_, sig) in signatures.0 { - self.l1_batch_qc + self.batch_qc .clone() .entry(sig.msg.number.clone()) - .or_insert_with(|| L1BatchQC::new(sig.msg.clone(), self.genesis())) + .or_insert_with(|| attester::BatchQC::new(sig.msg.clone(), self.genesis())) .add(&sig, self.genesis()); } // Now we check if we have enough weight to continue. if let Some(last_qc) = self.last_viewed_qc.clone() { let weight = self.genesis().attesters.weight( &self - .l1_batch_qc + .batch_qc .get(&last_qc.message.number.next_batch_number()) .context("last qc")? .signers, @@ -157,8 +151,8 @@ impl Network { } else { let weight = self.genesis().attesters.weight( &self - .l1_batch_qc - .get(&BatchNumber(0)) + .batch_qc + .get(&attester::BatchNumber(0)) .context("L1 batch QC")? .signers, ); diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index 0858a1f0..6508bd12 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -1,4 +1,4 @@ -use super::{batch_signatures::L1BatchSignatures, handshake, Network, ValidatorAddrs}; +use super::{batch_signatures::BatchSignatures, handshake, Network, ValidatorAddrs}; use crate::{noise, preface, rpc}; use anyhow::Context as _; use async_trait::async_trait; @@ -32,16 +32,20 @@ impl rpc::Handler for PushValidatorAddrsServer<' } } -struct L1BatchServer<'a>(&'a Network); +struct BatchServer<'a>(&'a Network); #[async_trait::async_trait] -impl rpc::Handler for L1BatchServer<'_> { +impl rpc::Handler for BatchServer<'_> { /// Here we bound the buffering of incoming consensus messages. fn max_req_size(&self) -> usize { 100 * kB } - async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::push_signature::Req) -> anyhow::Result<()> { + async fn handle( + &self, + _ctx: &ctx::Ctx, + req: rpc::push_batch_signature::Req, + ) -> anyhow::Result<()> { self.0 .batch_signatures .update(&self.0.genesis().attesters, &req.0) @@ -110,11 +114,11 @@ impl Network { ctx, self.cfg.rpc.push_block_store_state_rate, ); - let push_signature_client = rpc::Client::::new( + let push_signature_client = rpc::Client::::new( ctx, - self.cfg.rpc.push_l1_batch_signature_rate, + self.cfg.rpc.push_batch_signature_rate, ); - let push_signature_server = L1BatchServer(self); + let push_signature_server = BatchServer(self); let push_block_store_state_server = PushBlockStoreStateServer::new(self); let get_block_client = rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate); @@ -130,7 +134,7 @@ impl Network { .add_server( ctx, push_signature_server, - self.cfg.rpc.push_l1_batch_signature_rate, + self.cfg.rpc.push_batch_signature_rate, ) .add_client(&push_block_store_state_client) .add_server( @@ -183,7 +187,7 @@ impl Network { // Push L1 batch signatures updates to peer. s.spawn::<()>(async { - let mut old = L1BatchSignatures::default(); + let mut old = BatchSignatures::default(); let mut sub = self.batch_signatures.subscribe(); sub.mark_changed(); loop { @@ -193,7 +197,7 @@ impl Network { continue; } old = new; - let req = rpc::push_signature::Req(diff); + let req = rpc::push_batch_signature::Req(diff); push_signature_client.call(ctx, &req, kB).await?; } }); diff --git a/node/actors/network/src/gossip/tests/mod.rs b/node/actors/network/src/gossip/tests/mod.rs index 709db19d..9bb25fef 100644 --- a/node/actors/network/src/gossip/tests/mod.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -87,9 +87,7 @@ fn mk_version(rng: &mut R) -> u64 { struct View(im::HashMap>>); #[derive(Default)] -struct Signatures( - im::HashMap>>, -); +struct Signatures(im::HashMap>>); fn mk_netaddr( key: &validator::SecretKey, @@ -106,10 +104,10 @@ fn mk_netaddr( fn mk_batch( key: &attester::SecretKey, - number: BatchNumber, + number: attester::BatchNumber, timestamp: time::Utc, -) -> attester::SignedBatchMsg { - key.sign_batch_msg(attester::L1Batch { number, timestamp }) +) -> attester::Signed { + key.sign_msg(attester::Batch { number, timestamp }) } fn random_netaddr( @@ -124,15 +122,15 @@ fn random_netaddr( )) } -fn random_signature( +fn random_batch_signature( rng: &mut R, key: &attester::SecretKey, -) -> Arc> { - let batch = attester::L1Batch { +) -> Arc> { + let batch = attester::Batch { timestamp: mk_timestamp(rng), - number: BatchNumber(rng.gen_range(0..1000)), + number: attester::BatchNumber(rng.gen_range(0..1000)), }; - Arc::new(key.sign_batch_msg(batch.to_owned())) + Arc::new(key.sign_msg(batch.to_owned())) } fn update_netaddr( @@ -152,16 +150,16 @@ fn update_netaddr( fn update_signature( _rng: &mut R, - batch: &attester::L1Batch, + batch: &attester::Batch, key: &attester::SecretKey, batch_number_diff: i64, timestamp_diff: time::Duration, -) -> Arc> { - let batch = attester::L1Batch { +) -> Arc> { + let batch = attester::Batch { timestamp: batch.timestamp + timestamp_diff, - number: BatchNumber((batch.number.0 as i64 + batch_number_diff) as u64), + number: attester::BatchNumber((batch.number.0 as i64 + batch_number_diff) as u64), }; - Arc::new(key.sign_batch_msg(batch.to_owned())) + Arc::new(key.sign_msg(batch.to_owned())) } impl View { @@ -179,18 +177,15 @@ impl View { } impl Signatures { - fn insert(&mut self, entry: Arc>) { + fn insert(&mut self, entry: Arc>) { self.0.insert(entry.key.clone(), entry); } - fn get( - &mut self, - key: &attester::SecretKey, - ) -> Arc> { + fn get(&mut self, key: &attester::SecretKey) -> Arc> { self.0.get(&key.public()).unwrap().clone() } - fn as_vec(&self) -> Vec>> { + fn as_vec(&self) -> Vec>> { self.0.values().cloned().collect() } } @@ -532,13 +527,13 @@ async fn test_batch_signatures() { weight: 1250, })) .unwrap(); - let signatures = L1BatchSignaturesWatch::default(); + let signatures = BatchSignaturesWatch::default(); let mut sub = signatures.subscribe(); // Initial values. let mut want = Signatures::default(); for k in &keys[0..6] { - want.insert(random_signature(rng, k)); + want.insert(random_batch_signature(rng, k)); } signatures.update(&attesters, &want.as_vec()).await.unwrap(); assert_eq!(want.0, sub.borrow_and_update().0); @@ -562,10 +557,10 @@ async fn test_batch_signatures() { // older batch number let k4v2 = update_signature(rng, &want.get(&keys[4]).msg, &keys[4], -1, delta); // first entry for a key in the config - let k6v1 = random_signature(rng, &keys[6]); + let k6v1 = random_batch_signature(rng, &keys[6]); // entry for a key outside of the config let k8 = rng.gen(); - let k8v1 = random_signature(rng, &k8); + let k8v1 = random_batch_signature(rng, &k8); want.insert(k0v2.clone()); want.insert(k1v2.clone()); @@ -587,7 +582,7 @@ async fn test_batch_signatures() { // Invalid signature. let mut k0v3 = mk_batch( &keys[1], - BatchNumber(rng.gen_range(0..1000)), + attester::BatchNumber(rng.gen_range(0..1000)), time::UNIX_EPOCH + time::Duration::seconds(rng.gen_range(0..1000000000)), ); k0v3.key = keys[0].public(); diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index 54e81385..bcbb5dce 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -124,7 +124,7 @@ impl Runner { // Update QC batches in the background. s.spawn(async { - self.net.gossip.update_batch_qc(ctx).await; + let _ = self.net.gossip.update_batch_qc(ctx).await; Ok(()) }); diff --git a/node/actors/network/src/proto/gossip.proto b/node/actors/network/src/proto/gossip.proto index 9a0a1f5b..42d427ba 100644 --- a/node/actors/network/src/proto/gossip.proto +++ b/node/actors/network/src/proto/gossip.proto @@ -18,7 +18,7 @@ message PushValidatorAddrs { repeated roles.validator.Signed net_addresses = 1; } -message PushSignature { +message PushBatchSignature { // Signed roles.validator.Msg.net_address. repeated roles.attester.Signed signatures = 1; } diff --git a/node/actors/network/src/proto/signature.proto b/node/actors/network/src/proto/signature.proto deleted file mode 100644 index 3b0e842e..00000000 --- a/node/actors/network/src/proto/signature.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; - -package zksync.network.consensus; - -import "zksync/roles/attester.proto"; -import "zksync/std.proto"; - -message SignatureReq { - optional roles.attester.Signed msg = 1; -} - -message SignatureResp {} diff --git a/node/actors/network/src/rpc/mod.rs b/node/actors/network/src/rpc/mod.rs index e30c4808..8b17ca53 100644 --- a/node/actors/network/src/rpc/mod.rs +++ b/node/actors/network/src/rpc/mod.rs @@ -25,8 +25,8 @@ pub(crate) mod consensus; pub(crate) mod get_block; mod metrics; pub(crate) mod ping; +pub(crate) mod push_batch_signature; pub(crate) mod push_block_store_state; -pub(crate) mod push_signature; pub(crate) mod push_validator_addrs; #[cfg(test)] pub(crate) mod testonly; diff --git a/node/actors/network/src/rpc/push_signature.rs b/node/actors/network/src/rpc/push_batch_signature.rs similarity index 85% rename from node/actors/network/src/rpc/push_signature.rs rename to node/actors/network/src/rpc/push_batch_signature.rs index ab5fc6a0..31be379e 100644 --- a/node/actors/network/src/rpc/push_signature.rs +++ b/node/actors/network/src/rpc/push_batch_signature.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::{mux, proto::gossip as proto}; use anyhow::Context as _; -use zksync_consensus_roles::attester::{self, L1Batch}; +use zksync_consensus_roles::attester::{self, Batch}; use zksync_protobuf::ProtoFmt; /// Signature RPC. @@ -19,10 +19,10 @@ impl super::Rpc for Rpc { /// Signed consensus message that the receiving peer should process. #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct Req(pub(crate) Vec>>); +pub(crate) struct Req(pub(crate) Vec>>); impl ProtoFmt for Req { - type Proto = proto::PushSignature; + type Proto = proto::PushBatchSignature; fn read(r: &Self::Proto) -> anyhow::Result { let mut signatures = vec![]; diff --git a/node/actors/network/src/testonly.rs b/node/actors/network/src/testonly.rs index c0e3c0ae..5e08dbbb 100644 --- a/node/actors/network/src/testonly.rs +++ b/node/actors/network/src/testonly.rs @@ -77,31 +77,27 @@ pub fn new_configs( setup: &validator::testonly::Setup, gossip_peers: usize, ) -> Vec { - let configs = setup - .validator_keys - .iter() - .zip(setup.attester_keys.iter()) - .map(|(validator_key, attester_key)| { - let addr = net::tcp::testonly::reserve_listener(); - Config { - server_addr: addr, - public_addr: (*addr).into(), - // Pings are disabled in tests by default to avoid dropping connections - // due to timeouts. - ping_timeout: None, - validator_key: Some(validator_key.clone()), - gossip: GossipConfig { - key: rng.gen(), - dynamic_inbound_limit: usize::MAX, - static_inbound: HashSet::default(), - static_outbound: HashMap::default(), - }, - max_block_size: usize::MAX, - tcp_accept_rate: limiter::Rate::INF, - rpc: RpcConfig::default(), - max_block_queue_size: 10, - } - }); + let configs = setup.validator_keys.iter().map(|validator_key| { + let addr = net::tcp::testonly::reserve_listener(); + Config { + server_addr: addr, + public_addr: (*addr).into(), + // Pings are disabled in tests by default to avoid dropping connections + // due to timeouts. + ping_timeout: None, + validator_key: Some(validator_key.clone()), + gossip: GossipConfig { + key: rng.gen(), + dynamic_inbound_limit: usize::MAX, + static_inbound: HashSet::default(), + static_outbound: HashMap::default(), + }, + max_block_size: usize::MAX, + tcp_accept_rate: limiter::Rate::INF, + rpc: RpcConfig::default(), + max_block_queue_size: 10, + } + }); let mut cfgs: Vec<_> = configs.collect(); let n = cfgs.len(); diff --git a/node/libs/roles/src/attester/conv.rs b/node/libs/roles/src/attester/conv.rs index 90b9065c..2b98511d 100644 --- a/node/libs/roles/src/attester/conv.rs +++ b/node/libs/roles/src/attester/conv.rs @@ -5,12 +5,12 @@ use zksync_consensus_utils::enum_util::Variant; use zksync_protobuf::{read_required, required, ProtoFmt}; use super::{ - AggregateSignature, BatchNumber, L1Batch, L1BatchQC, Msg, MsgHash, PublicKey, Signature, - SignedBatchMsg, Signers, WeightedAttester, + AggregateSignature, Batch, BatchNumber, BatchQC, Msg, MsgHash, PublicKey, Signature, Signed, + Signers, WeightedAttester, }; -impl ProtoFmt for L1Batch { - type Proto = proto::L1Batch; +impl ProtoFmt for Batch { + type Proto = proto::Batch; fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { number: BatchNumber(*required(&r.number).context("number")?), @@ -25,7 +25,7 @@ impl ProtoFmt for L1Batch { } } -impl + Clone> ProtoFmt for SignedBatchMsg { +impl + Clone> ProtoFmt for Signed { type Proto = proto::Signed; fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { @@ -49,7 +49,7 @@ impl ProtoFmt for Msg { fn read(r: &Self::Proto) -> anyhow::Result { use proto::msg::T; Ok(match r.t.as_ref().context("missing")? { - T::L1Batch(r) => Self::L1Batch(ProtoFmt::read(r).context("L1Batch")?), + T::Batch(r) => Self::Batch(ProtoFmt::read(r).context("Batch")?), }) } @@ -57,7 +57,7 @@ impl ProtoFmt for Msg { use proto::msg::T; let t = match self { - Self::L1Batch(x) => T::L1Batch(x.build()), + Self::Batch(x) => T::Batch(x.build()), }; Self::Proto { t: Some(t) } @@ -145,8 +145,8 @@ impl ProtoFmt for MsgHash { } } -impl ProtoFmt for L1BatchQC { - type Proto = proto::L1BatchQc; +impl ProtoFmt for BatchQC { + type Proto = proto::BatchQc; fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { diff --git a/node/libs/roles/src/attester/keys/aggregate_signature.rs b/node/libs/roles/src/attester/keys/aggregate_signature.rs index 20eb72ab..95ab6e0e 100644 --- a/node/libs/roles/src/attester/keys/aggregate_signature.rs +++ b/node/libs/roles/src/attester/keys/aggregate_signature.rs @@ -1,4 +1,4 @@ -use crate::attester::{L1Batch, MsgHash}; +use crate::attester::{Batch, MsgHash}; use super::{PublicKey, Signature}; use std::fmt; @@ -18,7 +18,7 @@ impl AggregateSignature { /// Verify a list of messages against a list of public keys. pub(crate) fn verify_messages<'a>( &self, - messages_and_keys: impl Iterator, + messages_and_keys: impl Iterator, ) -> anyhow::Result<()> { let hashes_and_keys = messages_and_keys.map(|(message, key)| (message.insert().hash(), key)); diff --git a/node/libs/roles/src/attester/keys/secret_key.rs b/node/libs/roles/src/attester/keys/secret_key.rs index b225521b..eb911fbb 100644 --- a/node/libs/roles/src/attester/keys/secret_key.rs +++ b/node/libs/roles/src/attester/keys/secret_key.rs @@ -1,5 +1,5 @@ use super::{PublicKey, Signature}; -use crate::attester::{L1Batch, Msg, MsgHash, SignedBatchMsg}; +use crate::attester::{Batch, Msg, MsgHash, Signed}; use std::{fmt, sync::Arc}; use zksync_consensus_crypto::{bn254, ByteFmt, Text, TextFmt}; use zksync_consensus_utils::enum_util::Variant; @@ -22,12 +22,12 @@ impl SecretKey { } /// Signs a batch message. - pub fn sign_batch_msg(&self, msg: L1Batch) -> SignedBatchMsg + pub fn sign_msg(&self, msg: Batch) -> Signed where V: Variant, { let msg = msg.insert(); - SignedBatchMsg { + Signed { sig: self.sign_hash(&msg.hash()), key: self.public(), msg: V::extract(msg).unwrap(), diff --git a/node/libs/roles/src/attester/messages/l1_batch.rs b/node/libs/roles/src/attester/messages/batch.rs similarity index 83% rename from node/libs/roles/src/attester/messages/l1_batch.rs rename to node/libs/roles/src/attester/messages/batch.rs index d64f3bd4..16b6cec8 100644 --- a/node/libs/roles/src/attester/messages/l1_batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -5,7 +5,7 @@ use crate::{ validator::Genesis, }; -use super::{SignedBatchMsg, Signers}; +use super::{Signed, Signers}; #[derive(Clone, Debug, PartialEq, Eq, Hash, Default, PartialOrd)] /// A batch number. @@ -21,7 +21,7 @@ impl BatchNumber { /// A message to send by attesters to the gossip network. /// It contains the attester signature to sign the block batches to be sent to L1. #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct L1Batch { +pub struct Batch { /// The number of the batch. pub number: BatchNumber, /// Time at which this message has been signed. @@ -32,18 +32,18 @@ pub struct L1Batch { /// A certificate for a batch of L2 blocks to be sent to L1. /// It contains the signatures of the attesters that signed the batch. #[derive(Debug, Clone, Eq, PartialEq)] -pub struct L1BatchQC { +pub struct BatchQC { /// The aggregate signature of the signed L1 batches. pub signature: AggregateSignature, /// The attesters that signed this message. pub signers: Signers, /// The message that was signed. - pub message: L1Batch, + pub message: Batch, } -/// Error returned by `L1BatchQC::verify()` if the signature is invalid. +/// Error returned by `BatchQC::verify()` if the signature is invalid. #[derive(thiserror::Error, Debug)] -pub enum L1BatchQCVerifyError { +pub enum BatchQCVerifyError { /// Bad signature. #[error("bad signature: {0:#}")] BadSignature(#[source] anyhow::Error), @@ -60,16 +60,16 @@ pub enum L1BatchQCVerifyError { BadSignersSet, } -impl L1Batch { +impl Batch { /// Checks if `self` is a newer version than `b`. pub fn is_newer(&self, b: &Self) -> bool { (&self.number, self.timestamp) > (&b.number, b.timestamp) } } -impl L1BatchQC { - /// Create a new empty instance for a given `L1Batch` message. - pub fn new(message: L1Batch, genesis: &Genesis) -> Self { +impl BatchQC { + /// Create a new empty instance for a given `Batch` message. + pub fn new(message: Batch, genesis: &Genesis) -> Self { Self { message, signers: Signers::new(genesis.attesters.len()), @@ -79,7 +79,7 @@ impl L1BatchQC { /// Add a attester's signature. /// Signature is assumed to be already verified. - pub fn add(&mut self, msg: &SignedBatchMsg, genesis: &Genesis) { + pub fn add(&mut self, msg: &Signed, genesis: &Genesis) { if self.message != msg.msg { return; }; @@ -93,9 +93,9 @@ impl L1BatchQC { self.signature.add(&msg.sig); } - /// Verifies the signature of the L1BatchQC. - pub fn verify(&self, genesis: &Genesis) -> Result<(), L1BatchQCVerifyError> { - use L1BatchQCVerifyError as Error; + /// Verifies the signature of the BatchQC. + pub fn verify(&self, genesis: &Genesis) -> Result<(), BatchQCVerifyError> { + use BatchQCVerifyError as Error; if self.signers.len() != genesis.attesters.len() { return Err(Error::BadSignersSet); } diff --git a/node/libs/roles/src/attester/messages/mod.rs b/node/libs/roles/src/attester/messages/mod.rs index d8a04914..bd68233d 100644 --- a/node/libs/roles/src/attester/messages/mod.rs +++ b/node/libs/roles/src/attester/messages/mod.rs @@ -1,6 +1,6 @@ //! Attester messages. -mod l1_batch; +mod batch; mod msg; -pub use l1_batch::*; +pub use batch::*; pub use msg::*; diff --git a/node/libs/roles/src/attester/messages/msg.rs b/node/libs/roles/src/attester/messages/msg.rs index d729e69c..20faee73 100644 --- a/node/libs/roles/src/attester/messages/msg.rs +++ b/node/libs/roles/src/attester/messages/msg.rs @@ -1,7 +1,7 @@ use std::{collections::BTreeMap, fmt}; use crate::{ - attester::{L1Batch, PublicKey, Signature}, + attester::{Batch, PublicKey, Signature}, validator::ViewNumber, }; use anyhow::Context as _; @@ -13,7 +13,7 @@ use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; #[derive(Clone, Debug, PartialEq, Eq)] pub enum Msg { /// L1 batch message. - L1Batch(L1Batch), + Batch(Batch), } impl Msg { @@ -23,12 +23,12 @@ impl Msg { } } -impl Variant for L1Batch { +impl Variant for Batch { fn insert(self) -> Msg { - Msg::L1Batch(self) + Msg::Batch(self) } fn extract(msg: Msg) -> Result { - let Msg::L1Batch(this) = msg; + let Msg::Batch(this) = msg; Ok(this) } } @@ -36,7 +36,7 @@ impl Variant for L1Batch { /// Strongly typed signed l1 batch message. /// WARNING: signature is not guaranteed to be valid. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct SignedBatchMsg> { +pub struct Signed> { /// The message that was signed. pub msg: V, /// The public key of the signer. @@ -46,7 +46,7 @@ pub struct SignedBatchMsg> { } /// Struct that represents a bit map of attesters. We use it to compactly store -/// which attesters signed a given L1Batch message. +/// which attesters signed a given Batch message. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct Signers(pub BitVec); @@ -252,7 +252,7 @@ impl fmt::Debug for MsgHash { } } -impl + Clone> SignedBatchMsg { +impl + Clone> Signed { /// Verify the signature on the message. pub fn verify(&self) -> anyhow::Result<()> { self.sig.verify_msg(&self.msg.clone().insert(), &self.key) @@ -260,8 +260,8 @@ impl + Clone> SignedBatchMsg { /// Casts a signed message variant to sub/super variant. /// It is an equivalent of constructing/deconstructing enum values. - pub fn cast(self) -> Result, BadVariantError> { - Ok(SignedBatchMsg { + pub fn cast(self) -> Result, BadVariantError> { + Ok(Signed { msg: V::extract(self.msg.insert())?, key: self.key, sig: self.sig, diff --git a/node/libs/roles/src/attester/testonly.rs b/node/libs/roles/src/attester/testonly.rs index 4a6e1859..b213c0f5 100644 --- a/node/libs/roles/src/attester/testonly.rs +++ b/node/libs/roles/src/attester/testonly.rs @@ -1,6 +1,6 @@ use super::{ - AggregateSignature, BatchNumber, Committee, L1Batch, L1BatchQC, Msg, MsgHash, PublicKey, - SecretKey, Signature, SignedBatchMsg, Signers, WeightedAttester, + AggregateSignature, Batch, BatchNumber, BatchQC, Committee, Msg, MsgHash, PublicKey, SecretKey, + Signature, Signed, Signers, WeightedAttester, }; use bit_vec::BitVec; use rand::{ @@ -51,18 +51,18 @@ impl Distribution for Standard { } } -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> L1Batch { - L1Batch { +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Batch { + Batch { number: BatchNumber(rng.gen()), timestamp: time::UNIX_EPOCH + time::Duration::seconds(rng.gen_range(0..1000000000)), } } } -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> L1BatchQC { - L1BatchQC { +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> BatchQC { + BatchQC { message: rng.gen(), signers: rng.gen(), signature: rng.gen(), @@ -72,7 +72,7 @@ impl Distribution for Standard { impl Distribution for Standard { fn sample(&self, rng: &mut R) -> Msg { - Msg::L1Batch(rng.gen()) + Msg::Batch(rng.gen()) } } @@ -88,9 +88,9 @@ impl Distribution for Standard { } } -impl> Distribution> for Standard { - fn sample(&self, rng: &mut R) -> SignedBatchMsg { - rng.gen::().sign_batch_msg(rng.gen()) +impl> Distribution> for Standard { + fn sample(&self, rng: &mut R) -> Signed { + rng.gen::().sign_msg(rng.gen()) } } diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index 9b451076..1e01e288 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -69,8 +69,8 @@ fn test_text_encoding() { fn test_schema_encoding() { let ctx = ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - test_encode_random::>(rng); - test_encode_random::(rng); + test_encode_random::>(rng); + test_encode_random::(rng); test_encode_random::(rng); test_encode_random::(rng); test_encode_random::(rng); @@ -134,16 +134,16 @@ fn test_agg_signature_verify() { .is_err()); } -fn make_l1_batch_msg(rng: &mut impl Rng) -> L1Batch { - L1Batch { +fn make_batch_msg(rng: &mut impl Rng) -> Batch { + Batch { number: BatchNumber(rng.gen()), timestamp: time::UNIX_EPOCH + time::Duration::seconds(rng.gen_range(0..1000000000)), } } #[test] -fn test_l1_batch_qc() { - use L1BatchQCVerifyError as Error; +fn test_batch_qc() { + use BatchQCVerifyError as Error; let ctx = ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); @@ -156,9 +156,9 @@ fn test_l1_batch_qc() { let attester_weight = setup1.genesis.attesters.total_weight() / 6; for i in 0..setup1.attester_keys.len() + 1 { - let mut qc = L1BatchQC::new(make_l1_batch_msg(rng), &setup1.genesis); + let mut qc = BatchQC::new(make_batch_msg(rng), &setup1.genesis); for key in &setup1.attester_keys[0..i] { - qc.add(&key.sign_batch_msg(qc.message.clone()), &setup1.genesis); + qc.add(&key.sign_msg(qc.message.clone()), &setup1.genesis); } let expected_weight = i as u64 * attester_weight; if expected_weight >= setup1.genesis.attesters.threshold() { @@ -186,11 +186,11 @@ fn test_attester_committee_weights() { // Expected sum of the attesters weights let sums = [1000, 1600, 2400, 8400, 9300, 10000]; - let msg = make_l1_batch_msg(rng); - let mut qc = L1BatchQC::new(msg.clone(), &setup.genesis); + let msg = make_batch_msg(rng); + let mut qc = BatchQC::new(msg.clone(), &setup.genesis); for (n, weight) in sums.iter().enumerate() { let key = &setup.attester_keys[n]; - qc.add(&key.sign_batch_msg(msg.clone()), &setup.genesis); + qc.add(&key.sign_msg(msg.clone()), &setup.genesis); assert_eq!(setup.genesis.attesters.weight(&qc.signers), *weight); } } diff --git a/node/libs/roles/src/proto/attester.proto b/node/libs/roles/src/proto/attester.proto index 2b3f814c..c8c51361 100644 --- a/node/libs/roles/src/proto/attester.proto +++ b/node/libs/roles/src/proto/attester.proto @@ -4,21 +4,21 @@ package zksync.roles.attester; import "zksync/std.proto"; -message L1Batch { +message Batch { optional uint64 number = 1; // required optional std.Timestamp timestamp = 2; // required // TODO: add the hash of the L1 batch as a field } -message L1BatchQC { - optional L1Batch msg = 1; // required +message BatchQC { + optional Batch msg = 1; // required optional std.BitVector signers = 2; // required optional AggregateSignature sig = 3; // required } message Msg { oneof t { // required - L1Batch l1_batch = 4; + Batch batch = 4; } } diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 1c72855d..2eb8e44d 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -1,5 +1,5 @@ //! Test-only utilities. -use crate::attester::{self, L1Batch, SignedBatchMsg, WeightedAttester}; +use crate::attester; use super::{ AggregateSignature, BlockHeader, BlockNumber, ChainId, CommitQC, Committee, ConsensusMsg, @@ -135,10 +135,10 @@ impl Setup { } /// Pushes a new L1 batch. - pub fn push_batch(&mut self, batch: L1Batch) { + pub fn push_batch(&mut self, batch: attester::Batch) { for key in &self.0.attester_keys { - let signed = key.sign_batch_msg(batch.clone()); - self.0.signed_l1_batches.push(signed); + let signed = key.sign_msg(batch.clone()); + self.0.signed_batches.push(signed); } } } @@ -152,26 +152,26 @@ impl From for Setup { first_block: spec.first_block, protocol_version: spec.protocol_version, - validators: Committee::new(spec.validator_weights.iter().map( - |(k, w)| WeightedValidator { + validators: Committee::new(spec.validator_weights.iter().map(|(k, w)| { + WeightedValidator { key: k.public(), weight: *w, - }, - )) + } + })) .unwrap(), - attesters: attester::Committee::new(spec.attester_weights.iter().map( - |(k, w)| WeightedAttester { + attesters: attester::Committee::new(spec.attester_weights.iter().map(|(k, w)| { + attester::WeightedAttester { key: k.public(), weight: *w, - }, - )) + } + })) .unwrap(), leader_selection: spec.leader_selection, } .with_hash(), validator_keys: spec.validator_weights.into_iter().map(|(k, _)| k).collect(), attester_keys: spec.attester_weights.into_iter().map(|(k, _)| k).collect(), - signed_l1_batches: vec![], + signed_batches: vec![], blocks: vec![], }) } @@ -187,7 +187,7 @@ pub struct SetupInner { /// Past blocks. pub blocks: Vec, /// L1 batches - pub signed_l1_batches: Vec>, + pub signed_batches: Vec>, /// Genesis config. pub genesis: Genesis, } From 0271e43c9f369a7089a4baf06dbd8331f2734800 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 7 May 2024 17:53:39 -0300 Subject: [PATCH 47/79] Fix some function names --- node/actors/network/src/gossip/mod.rs | 2 +- node/libs/roles/src/attester/messages/batch.rs | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index ba509aab..efc8a852 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -141,7 +141,7 @@ impl Network { let weight = self.genesis().attesters.weight( &self .batch_qc - .get(&last_qc.message.number.next_batch_number()) + .get(&last_qc.message.number.next()) .context("last qc")? .signers, ); diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index 16b6cec8..d0a83826 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -1,9 +1,6 @@ use zksync_concurrency::time; -use crate::{ - attester::{self, AggregateSignature}, - validator::Genesis, -}; +use crate::{attester, validator::Genesis}; use super::{Signed, Signers}; @@ -13,7 +10,7 @@ pub struct BatchNumber(pub u64); impl BatchNumber { /// Increment the batch number. - pub fn next_batch_number(&self) -> BatchNumber { + pub fn next(&self) -> BatchNumber { BatchNumber(self.0.checked_add(1).unwrap_or(0)) } } @@ -34,7 +31,7 @@ pub struct Batch { #[derive(Debug, Clone, Eq, PartialEq)] pub struct BatchQC { /// The aggregate signature of the signed L1 batches. - pub signature: AggregateSignature, + pub signature: attester::AggregateSignature, /// The attesters that signed this message. pub signers: Signers, /// The message that was signed. From ecb3c99136deb2df4e16067f240f4de69872b37d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 8 May 2024 11:10:10 -0300 Subject: [PATCH 48/79] Implement hash for every curve signature and derive it for each role --- node/actors/network/src/rpc/tests.rs | 1 + node/libs/crypto/src/bls12_381/mod.rs | 6 ++++++ node/libs/crypto/src/bn254/mod.rs | 11 ++++++++--- node/libs/crypto/src/ed25519/mod.rs | 6 ++++++ node/libs/roles/src/attester/keys/signature.rs | 8 +------- node/libs/roles/src/node/keys.rs | 2 +- node/libs/roles/src/validator/keys/signature.rs | 8 +------- 7 files changed, 24 insertions(+), 18 deletions(-) diff --git a/node/actors/network/src/rpc/tests.rs b/node/actors/network/src/rpc/tests.rs index b3490406..f7cb7e72 100644 --- a/node/actors/network/src/rpc/tests.rs +++ b/node/actors/network/src/rpc/tests.rs @@ -17,6 +17,7 @@ fn test_capability_rpc_correspondence() { push_block_store_state::Rpc::CAPABILITY_ID, get_block::Rpc::CAPABILITY_ID, ping::Rpc::CAPABILITY_ID, + push_batch_signature::Rpc::CAPABILITY_ID, ]; assert_eq!(ids.len(), HashSet::from(ids).len()); } diff --git a/node/libs/crypto/src/bls12_381/mod.rs b/node/libs/crypto/src/bls12_381/mod.rs index 248dfc99..3ae37d17 100644 --- a/node/libs/crypto/src/bls12_381/mod.rs +++ b/node/libs/crypto/src/bls12_381/mod.rs @@ -168,6 +168,12 @@ impl Ord for Signature { } } +impl std::hash::Hash for Signature { + fn hash(&self, state: &mut H) { + ByteFmt::encode(self).hash(state) + } +} + /// Type safety wrapper around a `blst` aggregate signature #[derive(Clone, Debug)] pub struct AggregateSignature(bls::AggregateSignature); diff --git a/node/libs/crypto/src/bn254/mod.rs b/node/libs/crypto/src/bn254/mod.rs index b8d45ac6..730313ce 100644 --- a/node/libs/crypto/src/bn254/mod.rs +++ b/node/libs/crypto/src/bn254/mod.rs @@ -15,7 +15,6 @@ use pairing::{ use std::{ collections::HashMap, fmt::{Debug, Formatter}, - hash::{Hash, Hasher}, io::Cursor, }; @@ -119,8 +118,8 @@ impl PublicKey { } } -impl Hash for PublicKey { - fn hash(&self, state: &mut H) { +impl std::hash::Hash for PublicKey { + fn hash(&self, state: &mut H) { state.write(&self.encode()); } } @@ -199,6 +198,12 @@ impl ByteFmt for Signature { } } +impl std::hash::Hash for Signature { + fn hash(&self, state: &mut H) { + ByteFmt::encode(self).hash(state) + } +} + impl PartialOrd for Signature { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) diff --git a/node/libs/crypto/src/ed25519/mod.rs b/node/libs/crypto/src/ed25519/mod.rs index 284869fc..4122ec04 100644 --- a/node/libs/crypto/src/ed25519/mod.rs +++ b/node/libs/crypto/src/ed25519/mod.rs @@ -123,6 +123,12 @@ impl ByteFmt for Signature { } } +impl std::hash::Hash for Signature { + fn hash(&self, state: &mut H) { + ByteFmt::encode(self).hash(state) + } +} + /// Error returned when an invalid signature is detected. #[derive(Debug, thiserror::Error)] #[error("invalid signature")] diff --git a/node/libs/roles/src/attester/keys/signature.rs b/node/libs/roles/src/attester/keys/signature.rs index f4d5f3cb..22583ddf 100644 --- a/node/libs/roles/src/attester/keys/signature.rs +++ b/node/libs/roles/src/attester/keys/signature.rs @@ -5,7 +5,7 @@ use std::fmt; use zksync_consensus_crypto::{bn254, ByteFmt, Text, TextFmt}; /// A signature of an L1 batch from an attester. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, Hash)] pub struct Signature(pub(crate) bn254::Signature); impl Signature { @@ -48,9 +48,3 @@ impl fmt::Debug for Signature { fmt.write_str(&TextFmt::encode(self)) } } - -impl std::hash::Hash for Signature { - fn hash(&self, state: &mut H) { - ByteFmt::encode(self).hash(state) - } -} diff --git a/node/libs/roles/src/node/keys.rs b/node/libs/roles/src/node/keys.rs index b3fb0441..107ff538 100644 --- a/node/libs/roles/src/node/keys.rs +++ b/node/libs/roles/src/node/keys.rs @@ -99,7 +99,7 @@ impl fmt::Debug for PublicKey { } /// A signature of a message. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Signature(pub(super) ed25519::Signature); impl ByteFmt for Signature { diff --git a/node/libs/roles/src/validator/keys/signature.rs b/node/libs/roles/src/validator/keys/signature.rs index deb89eea..967ae149 100644 --- a/node/libs/roles/src/validator/keys/signature.rs +++ b/node/libs/roles/src/validator/keys/signature.rs @@ -4,7 +4,7 @@ use std::fmt; use zksync_consensus_crypto::{bls12_381, ByteFmt, Text, TextFmt}; /// A signature from a validator. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, Hash)] pub struct Signature(pub(crate) bls12_381::Signature); impl Signature { @@ -47,9 +47,3 @@ impl fmt::Debug for Signature { fmt.write_str(&TextFmt::encode(self)) } } - -impl std::hash::Hash for Signature { - fn hash(&self, state: &mut H) { - ByteFmt::encode(self).hash(state) - } -} From 2201c54af64ddb65b16146a3c90bdc3fef4ec8cb Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 8 May 2024 13:04:20 -0300 Subject: [PATCH 49/79] Fix incorrect docs for attesters --- node/actors/executor/src/lib.rs | 7 ---- node/actors/network/src/gossip/runner.rs | 8 ++--- .../network/src/rpc/push_batch_signature.rs | 2 +- .../libs/roles/src/attester/messages/batch.rs | 6 ++-- node/libs/roles/src/attester/messages/msg.rs | 32 +++++++------------ 5 files changed, 20 insertions(+), 35 deletions(-) diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 04e16a4b..2a50d54c 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -28,13 +28,6 @@ pub struct Validator { pub payload_manager: Box, } -/// Validator-related part of [`Executor`]. -#[derive(Debug)] -pub struct Attester { - /// Consensus network configuration. - pub key: attester::SecretKey, -} - /// Config of the node executor. #[derive(Clone, Debug)] pub struct Config { diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index 6508bd12..f6e994f5 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -32,11 +32,11 @@ impl rpc::Handler for PushValidatorAddrsServer<' } } -struct BatchServer<'a>(&'a Network); +struct PushBatchSignatureServer<'a>(&'a Network); #[async_trait::async_trait] -impl rpc::Handler for BatchServer<'_> { - /// Here we bound the buffering of incoming consensus messages. +impl rpc::Handler for PushBatchSignatureServer<'_> { + /// Here we bound the buffering of incoming batch messages. fn max_req_size(&self) -> usize { 100 * kB } @@ -118,7 +118,7 @@ impl Network { ctx, self.cfg.rpc.push_batch_signature_rate, ); - let push_signature_server = BatchServer(self); + let push_signature_server = PushBatchSignatureServer(self); let push_block_store_state_server = PushBlockStoreStateServer::new(self); let get_block_client = rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate); diff --git a/node/actors/network/src/rpc/push_batch_signature.rs b/node/actors/network/src/rpc/push_batch_signature.rs index 31be379e..b9d3c62e 100644 --- a/node/actors/network/src/rpc/push_batch_signature.rs +++ b/node/actors/network/src/rpc/push_batch_signature.rs @@ -17,7 +17,7 @@ impl super::Rpc for Rpc { type Resp = (); } -/// Signed consensus message that the receiving peer should process. +/// Signed batch message that the receiving peer should process. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct Req(pub(crate) Vec>>); diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index d0a83826..6576e60d 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -15,8 +15,8 @@ impl BatchNumber { } } -/// A message to send by attesters to the gossip network. -/// It contains the attester signature to sign the block batches to be sent to L1. +/// A message containing information about a batch of blocks. +/// It is signed by the attesters and then propagated through the gossip network. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Batch { /// The number of the batch. @@ -96,7 +96,7 @@ impl BatchQC { if self.signers.len() != genesis.attesters.len() { return Err(Error::BadSignersSet); } - // Verify the signers' weight is enough. + // Verify that the signer's weight is sufficient. let weight = genesis.attesters.weight(&self.signers); let threshold = genesis.attesters.threshold(); if weight < threshold { diff --git a/node/libs/roles/src/attester/messages/msg.rs b/node/libs/roles/src/attester/messages/msg.rs index 20faee73..42d2722b 100644 --- a/node/libs/roles/src/attester/messages/msg.rs +++ b/node/libs/roles/src/attester/messages/msg.rs @@ -1,9 +1,6 @@ use std::{collections::BTreeMap, fmt}; -use crate::{ - attester::{Batch, PublicKey, Signature}, - validator::ViewNumber, -}; +use crate::attester; use anyhow::Context as _; use bit_vec::BitVec; use zksync_consensus_crypto::{keccak256, ByteFmt, Text, TextFmt}; @@ -13,7 +10,7 @@ use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; #[derive(Clone, Debug, PartialEq, Eq)] pub enum Msg { /// L1 batch message. - Batch(Batch), + Batch(attester::Batch), } impl Msg { @@ -23,7 +20,7 @@ impl Msg { } } -impl Variant for Batch { +impl Variant for attester::Batch { fn insert(self) -> Msg { Msg::Batch(self) } @@ -40,9 +37,9 @@ pub struct Signed> { /// The message that was signed. pub msg: V, /// The public key of the signer. - pub key: PublicKey, + pub key: attester::PublicKey, /// The signature. - pub sig: Signature, + pub sig: attester::Signature, } /// Struct that represents a bit map of attesters. We use it to compactly store @@ -78,7 +75,7 @@ impl Signers { #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct Committee { vec: Vec, - indexes: BTreeMap, + indexes: BTreeMap, total_weight: u64, } @@ -122,7 +119,7 @@ impl Committee { } /// Iterates over attester keys. - pub fn iter_keys(&self) -> impl Iterator { + pub fn iter_keys(&self) -> impl Iterator { self.vec.iter().map(|v| &v.key) } @@ -133,7 +130,7 @@ impl Committee { } /// Returns true if the given attester is in the attester committee. - pub fn contains(&self, attester: &PublicKey) -> bool { + pub fn contains(&self, attester: &attester::PublicKey) -> bool { self.indexes.contains_key(attester) } @@ -143,16 +140,10 @@ impl Committee { } /// Get the index of a attester in the committee. - pub fn index(&self, attester: &PublicKey) -> Option { + pub fn index(&self, attester: &attester::PublicKey) -> Option { self.indexes.get(attester).copied() } - /// Computes the leader for the given view. - pub fn view_leader(&self, view_number: ViewNumber) -> PublicKey { - let index = view_number.0 as usize % self.len(); - self.get(index).unwrap().key.clone() - } - /// Signature weight threshold for this attester committee. pub fn threshold(&self) -> u64 { threshold(self.total_weight()) @@ -180,8 +171,9 @@ impl Committee { } } -/// Calculate the consensus threshold, the minimum votes' weight for any consensus action to be valid, +/// Calculate the attester threshold, that is the minimum votes weight for any attesters action to be valid, /// for a given committee total weight. +/// Technically we need just n > f+1, but for now we use a threshold consistent with the validator committee. pub fn threshold(total_weight: u64) -> u64 { total_weight - max_faulty_weight(total_weight) } @@ -273,7 +265,7 @@ impl + Clone> Signed { #[derive(Debug, Clone, PartialEq, Eq)] pub struct WeightedAttester { /// Attester key - pub key: PublicKey, + pub key: attester::PublicKey, /// Attester weight inside the Committee. pub weight: u64, } From 99c93027dfe30dcdccdd391042dc85b10bede244 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 8 May 2024 14:23:53 -0300 Subject: [PATCH 50/79] Update encoding tests for secret keys --- node/libs/roles/src/attester/tests.rs | 21 ++++++------------- node/libs/roles/src/node/tests.rs | 12 ++--------- .../roles/src/validator/messages/consensus.rs | 3 +-- node/libs/roles/src/validator/tests.rs | 12 ++--------- 4 files changed, 11 insertions(+), 37 deletions(-) diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index 1e01e288..d2841d48 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -1,4 +1,4 @@ -use crate::{attester, validator::testonly::Setup}; +use crate::validator::testonly::Setup; use super::*; use assert_matches::assert_matches; @@ -13,12 +13,7 @@ fn test_byte_encoding() { let rng = &mut ctx.rng(); let sk: SecretKey = rng.gen(); - assert_eq!( - sk.public(), - ::decode(&ByteFmt::encode(&sk)) - .unwrap() - .public() - ); + assert_eq!(sk, ByteFmt::decode(&ByteFmt::encode(&sk)).unwrap()); let pk: PublicKey = rng.gen(); assert_eq!(pk, ByteFmt::decode(&ByteFmt::encode(&pk)).unwrap()); @@ -40,10 +35,7 @@ fn test_text_encoding() { let sk: SecretKey = rng.gen(); let t = TextFmt::encode(&sk); - assert_eq!( - sk.public(), - Text::new(&t).decode::().unwrap().public() - ); + assert_eq!(sk, Text::new(&t).decode::().unwrap()); let pk: PublicKey = rng.gen(); let t = TextFmt::encode(&pk); @@ -150,8 +142,7 @@ fn test_batch_qc() { let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let mut genesis3 = (*setup1.genesis).clone(); - genesis3.attesters = - attester::Committee::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(); + genesis3.attesters = Committee::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(); let genesis3 = genesis3.with_hash(); let attester_weight = setup1.genesis.attesters.total_weight() / 6; @@ -209,7 +200,7 @@ fn test_committee_weights_overflow_check() { .collect(); // Creation should overflow - assert_matches!(attester::Committee::new(attesters), Err(_)); + assert_matches!(Committee::new(attesters), Err(_)); } #[test] @@ -226,5 +217,5 @@ fn test_committee_with_zero_weights() { .collect(); // Committee creation should error on zero weight attesters - assert_matches!(attester::Committee::new(attesters), Err(_)); + assert_matches!(Committee::new(attesters), Err(_)); } diff --git a/node/libs/roles/src/node/tests.rs b/node/libs/roles/src/node/tests.rs index 493d4ce8..9814b16f 100644 --- a/node/libs/roles/src/node/tests.rs +++ b/node/libs/roles/src/node/tests.rs @@ -7,12 +7,7 @@ use zksync_protobuf::testonly::{test_encode, test_encode_random}; #[test] fn test_byte_encoding() { let key = SecretKey::generate(); - assert_eq!( - key.public(), - ::decode(&ByteFmt::encode(&key)) - .unwrap() - .public() - ); + assert_eq!(key, ByteFmt::decode(&ByteFmt::encode(&key)).unwrap()); assert_eq!( key.public(), ByteFmt::decode(&ByteFmt::encode(&key.public())).unwrap() @@ -24,10 +19,7 @@ fn test_text_encoding() { let key = SecretKey::generate(); let t1 = TextFmt::encode(&key); let t2 = TextFmt::encode(&key.public()); - assert_eq!( - key.public(), - Text::new(&t1).decode::().unwrap().public() - ); + assert_eq!(key, Text::new(&t1).decode::().unwrap()); assert_eq!(key.public(), Text::new(&t2).decode().unwrap()); assert!(Text::new(&t1).decode::().is_err()); assert!(Text::new(&t2).decode::().is_err()); diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index df2f3475..bbae90de 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -317,8 +317,7 @@ impl Genesis { /// Computes the leader for the given view. pub fn view_leader(&self, view: ViewNumber) -> validator::PublicKey { - self.validators - .view_leader(view, &self.leader_selection) + self.validators.view_leader(view, &self.leader_selection) } /// Hash of the genesis. diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index 57f22614..b0bc1d28 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -13,12 +13,7 @@ fn test_byte_encoding() { let rng = &mut ctx.rng(); let sk: SecretKey = rng.gen(); - assert_eq!( - sk.public(), - ::decode(&ByteFmt::encode(&sk)) - .unwrap() - .public() - ); + assert_eq!(sk, ByteFmt::decode(&ByteFmt::encode(&sk)).unwrap()); let pk: PublicKey = rng.gen(); assert_eq!(pk, ByteFmt::decode(&ByteFmt::encode(&pk)).unwrap()); @@ -52,10 +47,7 @@ fn test_text_encoding() { let sk: SecretKey = rng.gen(); let t = TextFmt::encode(&sk); - assert_eq!( - sk.public(), - Text::new(&t).decode::().unwrap().public() - ); + assert_eq!(sk, Text::new(&t).decode::().unwrap()); let pk: PublicKey = rng.gen(); let t = TextFmt::encode(&pk); From e09b6f6705e08c4122d194ed582e92269c2d9c0d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 8 May 2024 14:24:27 -0300 Subject: [PATCH 51/79] Make batch qc adding function return proper errors --- .../libs/roles/src/attester/messages/batch.rs | 39 ++++++++++++++----- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index 6576e60d..a3a68d92 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -3,6 +3,7 @@ use zksync_concurrency::time; use crate::{attester, validator::Genesis}; use super::{Signed, Signers}; +use anyhow::ensure; #[derive(Clone, Debug, PartialEq, Eq, Hash, Default, PartialOrd)] /// A batch number. @@ -57,6 +58,23 @@ pub enum BatchQCVerifyError { BadSignersSet, } +/// Error returned by `BatchQC::add()` if the signature is invalid. +#[derive(thiserror::Error, Debug)] +pub enum BatchQCAddError { + /// Inconsistent messages. + #[error("Trying to add signature for a different message")] + InconsistentMessages, + /// Signer not present in the committee. + #[error("Signer not in committee: {signer:?}")] + SignerNotInCommittee { + /// Signer of the message. + signer: Box, + }, + /// Message already present in BatchQC. + #[error("Message already signed for BatchQC")] + Exists, +} + impl Batch { /// Checks if `self` is a newer version than `b`. pub fn is_newer(&self, b: &Self) -> bool { @@ -76,18 +94,19 @@ impl BatchQC { /// Add a attester's signature. /// Signature is assumed to be already verified. - pub fn add(&mut self, msg: &Signed, genesis: &Genesis) { - if self.message != msg.msg { - return; - }; - let Some(i) = genesis.attesters.index(&msg.key) else { - return; - }; - if self.signers.0[i] { - return; - }; + pub fn add(&mut self, msg: &Signed, genesis: &Genesis) -> anyhow::Result<()> { + use BatchQCAddError as Error; + ensure!(self.message != msg.msg, Error::InconsistentMessages); + let i = genesis + .attesters + .index(&msg.key) + .ok_or(Error::SignerNotInCommittee { + signer: Box::new(msg.key.clone()), + })?; + ensure!(self.signers.0[i], Error::Exists); self.signers.0.set(i, true); self.signature.add(&msg.sig); + Ok(()) } /// Verifies the signature of the BatchQC. From 76a0a9b6ae30f156959cbd022725095eba352334 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 8 May 2024 16:09:25 -0300 Subject: [PATCH 52/79] Remove timestamp for batch message --- .../network/src/gossip/batch_signatures.rs | 4 +-- node/actors/network/src/gossip/tests/mod.rs | 32 +++---------------- node/libs/roles/src/attester/conv.rs | 2 -- .../libs/roles/src/attester/messages/batch.rs | 17 ++-------- node/libs/roles/src/attester/testonly.rs | 1 - node/libs/roles/src/attester/tests.rs | 3 +- node/libs/roles/src/proto/attester.proto | 1 - 7 files changed, 11 insertions(+), 49 deletions(-) diff --git a/node/actors/network/src/gossip/batch_signatures.rs b/node/actors/network/src/gossip/batch_signatures.rs index 30475b5e..1e8a5535 100644 --- a/node/actors/network/src/gossip/batch_signatures.rs +++ b/node/actors/network/src/gossip/batch_signatures.rs @@ -17,7 +17,7 @@ impl BatchSignatures { let mut newer = vec![]; for (k, v) in &self.0 { if let Some(bv) = b.0.get(k) { - if !v.msg.is_newer(&bv.msg) { + if v.msg <= bv.msg { continue; } } @@ -54,7 +54,7 @@ impl BatchSignatures { continue; } if let Some(x) = self.0.get(&d.key) { - if !d.msg.is_newer(&x.msg) { + if d.msg <= x.msg { continue; } } diff --git a/node/actors/network/src/gossip/tests/mod.rs b/node/actors/network/src/gossip/tests/mod.rs index 9bb25fef..8fdf1db0 100644 --- a/node/actors/network/src/gossip/tests/mod.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -105,9 +105,8 @@ fn mk_netaddr( fn mk_batch( key: &attester::SecretKey, number: attester::BatchNumber, - timestamp: time::Utc, ) -> attester::Signed { - key.sign_msg(attester::Batch { number, timestamp }) + key.sign_msg(attester::Batch { number }) } fn random_netaddr( @@ -127,7 +126,6 @@ fn random_batch_signature( key: &attester::SecretKey, ) -> Arc> { let batch = attester::Batch { - timestamp: mk_timestamp(rng), number: attester::BatchNumber(rng.gen_range(0..1000)), }; Arc::new(key.sign_msg(batch.to_owned())) @@ -153,10 +151,8 @@ fn update_signature( batch: &attester::Batch, key: &attester::SecretKey, batch_number_diff: i64, - timestamp_diff: time::Duration, ) -> Arc> { let batch = attester::Batch { - timestamp: batch.timestamp + timestamp_diff, number: attester::BatchNumber((batch.number.0 as i64 + batch_number_diff) as u64), }; Arc::new(key.sign_msg(batch.to_owned())) @@ -538,24 +534,12 @@ async fn test_batch_signatures() { signatures.update(&attesters, &want.as_vec()).await.unwrap(); assert_eq!(want.0, sub.borrow_and_update().0); - // Update values. - let delta = time::Duration::seconds(10); // newer batch number - let k0v2 = update_signature(rng, &want.get(&keys[0]).msg, &keys[0], 1, -delta); + let k0v2 = update_signature(rng, &want.get(&keys[0]).msg, &keys[0], 1); // same batch number, newer timestamp - let k1v2 = update_signature(rng, &want.get(&keys[1]).msg, &keys[1], 0, delta); - // same batch number, same timestamp - let k2v2 = update_signature( - rng, - &want.get(&keys[2]).msg, - &keys[2], - 0, - time::Duration::ZERO, - ); - // same batch number, older timestamp - let k3v2 = update_signature(rng, &want.get(&keys[3]).msg, &keys[3], 0, -delta); + let k1v2 = update_signature(rng, &want.get(&keys[1]).msg, &keys[1], 0); // older batch number - let k4v2 = update_signature(rng, &want.get(&keys[4]).msg, &keys[4], -1, delta); + let k4v2 = update_signature(rng, &want.get(&keys[4]).msg, &keys[4], -1); // first entry for a key in the config let k6v1 = random_batch_signature(rng, &keys[6]); // entry for a key outside of the config @@ -568,8 +552,6 @@ async fn test_batch_signatures() { let update = [ k0v2, k1v2, - k2v2, - k3v2, k4v2, // no new entry for keys[5] k6v1, @@ -580,11 +562,7 @@ async fn test_batch_signatures() { assert_eq!(want.0, sub.borrow_and_update().0); // Invalid signature. - let mut k0v3 = mk_batch( - &keys[1], - attester::BatchNumber(rng.gen_range(0..1000)), - time::UNIX_EPOCH + time::Duration::seconds(rng.gen_range(0..1000000000)), - ); + let mut k0v3 = mk_batch(&keys[1], attester::BatchNumber(rng.gen_range(0..1000))); k0v3.key = keys[0].public(); assert!(signatures .update(&attesters, &[Arc::new(k0v3)]) diff --git a/node/libs/roles/src/attester/conv.rs b/node/libs/roles/src/attester/conv.rs index 2b98511d..b636fcc9 100644 --- a/node/libs/roles/src/attester/conv.rs +++ b/node/libs/roles/src/attester/conv.rs @@ -14,13 +14,11 @@ impl ProtoFmt for Batch { fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { number: BatchNumber(*required(&r.number).context("number")?), - timestamp: read_required(&r.timestamp).context("timestamp")?, }) } fn build(&self) -> Self::Proto { Self::Proto { number: Some(self.number.0), - timestamp: Some(self.timestamp.build()), } } } diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index a3a68d92..2c772ea4 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -1,5 +1,3 @@ -use zksync_concurrency::time; - use crate::{attester, validator::Genesis}; use super::{Signed, Signers}; @@ -18,12 +16,10 @@ impl BatchNumber { /// A message containing information about a batch of blocks. /// It is signed by the attesters and then propagated through the gossip network. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd)] pub struct Batch { /// The number of the batch. pub number: BatchNumber, - /// Time at which this message has been signed. - pub timestamp: time::Utc, // TODO: add the hash of the L1 batch as a field } @@ -75,13 +71,6 @@ pub enum BatchQCAddError { Exists, } -impl Batch { - /// Checks if `self` is a newer version than `b`. - pub fn is_newer(&self, b: &Self) -> bool { - (&self.number, self.timestamp) > (&b.number, b.timestamp) - } -} - impl BatchQC { /// Create a new empty instance for a given `Batch` message. pub fn new(message: Batch, genesis: &Genesis) -> Self { @@ -96,14 +85,14 @@ impl BatchQC { /// Signature is assumed to be already verified. pub fn add(&mut self, msg: &Signed, genesis: &Genesis) -> anyhow::Result<()> { use BatchQCAddError as Error; - ensure!(self.message != msg.msg, Error::InconsistentMessages); + ensure!(self.message == msg.msg, Error::InconsistentMessages); let i = genesis .attesters .index(&msg.key) .ok_or(Error::SignerNotInCommittee { signer: Box::new(msg.key.clone()), })?; - ensure!(self.signers.0[i], Error::Exists); + ensure!(!self.signers.0[i], Error::Exists); self.signers.0.set(i, true); self.signature.add(&msg.sig); Ok(()) diff --git a/node/libs/roles/src/attester/testonly.rs b/node/libs/roles/src/attester/testonly.rs index b213c0f5..3096e093 100644 --- a/node/libs/roles/src/attester/testonly.rs +++ b/node/libs/roles/src/attester/testonly.rs @@ -55,7 +55,6 @@ impl Distribution for Standard { fn sample(&self, rng: &mut R) -> Batch { Batch { number: BatchNumber(rng.gen()), - timestamp: time::UNIX_EPOCH + time::Duration::seconds(rng.gen_range(0..1000000000)), } } } diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index d2841d48..b97cb9f2 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -129,7 +129,6 @@ fn test_agg_signature_verify() { fn make_batch_msg(rng: &mut impl Rng) -> Batch { Batch { number: BatchNumber(rng.gen()), - timestamp: time::UNIX_EPOCH + time::Duration::seconds(rng.gen_range(0..1000000000)), } } @@ -181,7 +180,7 @@ fn test_attester_committee_weights() { let mut qc = BatchQC::new(msg.clone(), &setup.genesis); for (n, weight) in sums.iter().enumerate() { let key = &setup.attester_keys[n]; - qc.add(&key.sign_msg(msg.clone()), &setup.genesis); + qc.add(&key.sign_msg(msg.clone()), &setup.genesis).unwrap(); assert_eq!(setup.genesis.attesters.weight(&qc.signers), *weight); } } diff --git a/node/libs/roles/src/proto/attester.proto b/node/libs/roles/src/proto/attester.proto index c8c51361..4c5e2c73 100644 --- a/node/libs/roles/src/proto/attester.proto +++ b/node/libs/roles/src/proto/attester.proto @@ -6,7 +6,6 @@ import "zksync/std.proto"; message Batch { optional uint64 number = 1; // required - optional std.Timestamp timestamp = 2; // required // TODO: add the hash of the L1 batch as a field } From d4acad5c8a93e438a78a1b23346c27ef90fc94f1 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 8 May 2024 16:45:38 -0300 Subject: [PATCH 53/79] Update rpc method name for push batch votes --- node/actors/network/src/gossip/runner.rs | 16 ++++++---------- node/actors/network/src/proto/gossip.proto | 2 +- node/actors/network/src/rpc/mod.rs | 2 +- ...sh_batch_signature.rs => push_batch_votes.rs} | 2 +- node/actors/network/src/rpc/tests.rs | 2 +- 5 files changed, 10 insertions(+), 14 deletions(-) rename node/actors/network/src/rpc/{push_batch_signature.rs => push_batch_votes.rs} (96%) diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index f6e994f5..4395edfe 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -32,20 +32,16 @@ impl rpc::Handler for PushValidatorAddrsServer<' } } -struct PushBatchSignatureServer<'a>(&'a Network); +struct PushBatchVotesServer<'a>(&'a Network); #[async_trait::async_trait] -impl rpc::Handler for PushBatchSignatureServer<'_> { +impl rpc::Handler for PushBatchVotesServer<'_> { /// Here we bound the buffering of incoming batch messages. fn max_req_size(&self) -> usize { 100 * kB } - async fn handle( - &self, - _ctx: &ctx::Ctx, - req: rpc::push_batch_signature::Req, - ) -> anyhow::Result<()> { + async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::push_batch_votes::Req) -> anyhow::Result<()> { self.0 .batch_signatures .update(&self.0.genesis().attesters, &req.0) @@ -114,11 +110,11 @@ impl Network { ctx, self.cfg.rpc.push_block_store_state_rate, ); - let push_signature_client = rpc::Client::::new( + let push_signature_client = rpc::Client::::new( ctx, self.cfg.rpc.push_batch_signature_rate, ); - let push_signature_server = PushBatchSignatureServer(self); + let push_signature_server = PushBatchVotesServer(self); let push_block_store_state_server = PushBlockStoreStateServer::new(self); let get_block_client = rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate); @@ -197,7 +193,7 @@ impl Network { continue; } old = new; - let req = rpc::push_batch_signature::Req(diff); + let req = rpc::push_batch_votes::Req(diff); push_signature_client.call(ctx, &req, kB).await?; } }); diff --git a/node/actors/network/src/proto/gossip.proto b/node/actors/network/src/proto/gossip.proto index 42d427ba..ba733fbc 100644 --- a/node/actors/network/src/proto/gossip.proto +++ b/node/actors/network/src/proto/gossip.proto @@ -18,7 +18,7 @@ message PushValidatorAddrs { repeated roles.validator.Signed net_addresses = 1; } -message PushBatchSignature { +message PushBatchVotes { // Signed roles.validator.Msg.net_address. repeated roles.attester.Signed signatures = 1; } diff --git a/node/actors/network/src/rpc/mod.rs b/node/actors/network/src/rpc/mod.rs index 8b17ca53..7f339a4f 100644 --- a/node/actors/network/src/rpc/mod.rs +++ b/node/actors/network/src/rpc/mod.rs @@ -25,7 +25,7 @@ pub(crate) mod consensus; pub(crate) mod get_block; mod metrics; pub(crate) mod ping; -pub(crate) mod push_batch_signature; +pub(crate) mod push_batch_votes; pub(crate) mod push_block_store_state; pub(crate) mod push_validator_addrs; #[cfg(test)] diff --git a/node/actors/network/src/rpc/push_batch_signature.rs b/node/actors/network/src/rpc/push_batch_votes.rs similarity index 96% rename from node/actors/network/src/rpc/push_batch_signature.rs rename to node/actors/network/src/rpc/push_batch_votes.rs index b9d3c62e..d438445d 100644 --- a/node/actors/network/src/rpc/push_batch_signature.rs +++ b/node/actors/network/src/rpc/push_batch_votes.rs @@ -22,7 +22,7 @@ impl super::Rpc for Rpc { pub(crate) struct Req(pub(crate) Vec>>); impl ProtoFmt for Req { - type Proto = proto::PushBatchSignature; + type Proto = proto::PushBatchVotes; fn read(r: &Self::Proto) -> anyhow::Result { let mut signatures = vec![]; diff --git a/node/actors/network/src/rpc/tests.rs b/node/actors/network/src/rpc/tests.rs index f7cb7e72..4ac242f3 100644 --- a/node/actors/network/src/rpc/tests.rs +++ b/node/actors/network/src/rpc/tests.rs @@ -17,7 +17,7 @@ fn test_capability_rpc_correspondence() { push_block_store_state::Rpc::CAPABILITY_ID, get_block::Rpc::CAPABILITY_ID, ping::Rpc::CAPABILITY_ID, - push_batch_signature::Rpc::CAPABILITY_ID, + push_batch_votes::Rpc::CAPABILITY_ID, ]; assert_eq!(ids.len(), HashSet::from(ids).len()); } From b50fb48e7ea2cf5dcdb3ae447d401f8ce7282e3d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 8 May 2024 18:06:23 -0300 Subject: [PATCH 54/79] Calculate correctly the expected weight for tests --- node/libs/roles/src/attester/tests.rs | 12 +++++++++--- node/libs/roles/src/validator/tests.rs | 17 ++++++++++++++--- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index b97cb9f2..3ec3313b 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -143,14 +143,20 @@ fn test_batch_qc() { let mut genesis3 = (*setup1.genesis).clone(); genesis3.attesters = Committee::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(); let genesis3 = genesis3.with_hash(); - let attester_weight = setup1.genesis.attesters.total_weight() / 6; for i in 0..setup1.attester_keys.len() + 1 { let mut qc = BatchQC::new(make_batch_msg(rng), &setup1.genesis); for key in &setup1.attester_keys[0..i] { - qc.add(&key.sign_msg(qc.message.clone()), &setup1.genesis); + qc.add(&key.sign_msg(qc.message.clone()), &setup1.genesis) + .unwrap(); } - let expected_weight = i as u64 * attester_weight; + let expected_weight: u64 = setup1 + .genesis + .attesters + .iter() + .take(i) + .map(|w| w.weight) + .sum(); if expected_weight >= setup1.genesis.attesters.threshold() { assert!(qc.verify(&setup1.genesis).is_ok()); } else { diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index b0bc1d28..337d0f1a 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -215,7 +215,6 @@ fn test_commit_qc() { genesis3.validators = Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(); let genesis3 = genesis3.with_hash(); - let validator_weight = setup1.genesis.validators.total_weight() / 6; for i in 0..setup1.validator_keys.len() + 1 { let view = rng.gen(); @@ -224,7 +223,13 @@ fn test_commit_qc() { qc.add(&key.sign_msg(qc.message.clone()), &setup1.genesis) .unwrap(); } - let expected_weight = i as u64 * validator_weight; + let expected_weight: u64 = setup1 + .genesis + .attesters + .iter() + .take(i) + .map(|w| w.weight) + .sum(); if expected_weight >= setup1.genesis.validators.threshold() { qc.verify(&setup1.genesis).unwrap(); } else { @@ -319,7 +324,13 @@ fn test_prepare_qc() { ) .unwrap(); } - let expected_weight = n as u64 * setup1.genesis.validators.total_weight() / 6; + let expected_weight: u64 = setup1 + .genesis + .attesters + .iter() + .take(n) + .map(|w| w.weight) + .sum(); if expected_weight >= setup1.genesis.validators.threshold() { qc.verify(&setup1.genesis).unwrap(); } else { From 0aa2131eb4397d15c06e1e1bffb26361871996f3 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 8 May 2024 18:14:18 -0300 Subject: [PATCH 55/79] Fix format and clippy lints --- node/actors/bft/src/testonly/ut_harness.rs | 18 ++---------------- node/actors/executor/src/lib.rs | 2 +- node/actors/network/src/gossip/mod.rs | 2 +- node/libs/roles/src/attester/testonly.rs | 1 - node/libs/roles/src/attester/tests.rs | 2 +- node/libs/roles/src/validator/conv.rs | 12 +++--------- .../src/validator/messages/leader_prepare.rs | 3 +-- 7 files changed, 9 insertions(+), 31 deletions(-) diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index 8a5eb372..cc2cbb49 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -223,14 +223,7 @@ impl UTHarness { for (i, msg) in msgs.into_iter().enumerate() { let res = self.process_replica_prepare(ctx, msg).await; match ( - (i + 1) as u64 - * self - .genesis() - .validators - .iter() - .next() - .unwrap() - .weight + (i + 1) as u64 * self.genesis().validators.iter().next().unwrap().weight < self.genesis().validators.threshold(), first_match, ) { @@ -265,14 +258,7 @@ impl UTHarness { .leader .process_replica_commit(ctx, key.sign_msg(msg.clone())); match ( - (i + 1) as u64 - * self - .genesis() - .validators - .iter() - .next() - .unwrap() - .weight + (i + 1) as u64 * self.genesis().validators.iter().next().unwrap().weight < self.genesis().validators.threshold(), first_match, ) { diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 2a50d54c..db40171a 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -8,7 +8,7 @@ use std::{ use zksync_concurrency::{ctx, limiter, net, scope, time}; use zksync_consensus_bft as bft; use zksync_consensus_network as network; -use zksync_consensus_roles::{attester, node, validator}; +use zksync_consensus_roles::{node, validator}; use zksync_consensus_storage::{BlockStore, ReplicaStore}; use zksync_consensus_utils::pipe; use zksync_protobuf::kB; diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index efc8a852..7cac3afa 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -134,7 +134,7 @@ impl Network { .clone() .entry(sig.msg.number.clone()) .or_insert_with(|| attester::BatchQC::new(sig.msg.clone(), self.genesis())) - .add(&sig, self.genesis()); + .add(&sig, self.genesis())?; } // Now we check if we have enough weight to continue. if let Some(last_qc) = self.last_viewed_qc.clone() { diff --git a/node/libs/roles/src/attester/testonly.rs b/node/libs/roles/src/attester/testonly.rs index 3096e093..502b24a4 100644 --- a/node/libs/roles/src/attester/testonly.rs +++ b/node/libs/roles/src/attester/testonly.rs @@ -8,7 +8,6 @@ use rand::{ Rng, }; use std::sync::Arc; -use zksync_concurrency::time; use zksync_consensus_utils::enum_util::Variant; impl AggregateSignature { diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index 3ec3313b..d334ed55 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -3,7 +3,7 @@ use crate::validator::testonly::Setup; use super::*; use assert_matches::assert_matches; use rand::Rng; -use zksync_concurrency::{ctx, time}; +use zksync_concurrency::ctx; use zksync_consensus_crypto::{ByteFmt, Text, TextFmt}; use zksync_protobuf::testonly::test_encode_random; diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 91ccbc30..2a592cc4 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -39,10 +39,8 @@ impl ProtoFmt for GenesisRaw { first_block: BlockNumber(*required(&r.first_block).context("first_block")?), protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), - validators: Committee::new(validators.into_iter()) - .context("validators_v1")?, - attesters: attester::Committee::new(attesters.into_iter()) - .context("attesters")?, + validators: Committee::new(validators.into_iter()).context("validators_v1")?, + attesters: attester::Committee::new(attesters.into_iter()).context("attesters")?, leader_selection: read_required(&r.leader_selection).context("leader_selection")?, }) } @@ -53,11 +51,7 @@ impl ProtoFmt for GenesisRaw { first_block: Some(self.first_block.0), protocol_version: Some(self.protocol_version.0), - validators_v1: self - .validators - .iter() - .map(|v| v.build()) - .collect(), + validators_v1: self.validators.iter().map(|v| v.build()).collect(), attesters: self.attesters.iter().map(|v| v.build()).collect(), leader_selection: Some(self.leader_selection.build()), } diff --git a/node/libs/roles/src/validator/messages/leader_prepare.rs b/node/libs/roles/src/validator/messages/leader_prepare.rs index 306935b4..f452a594 100644 --- a/node/libs/roles/src/validator/messages/leader_prepare.rs +++ b/node/libs/roles/src/validator/messages/leader_prepare.rs @@ -79,8 +79,7 @@ impl PrepareQC { let mut count: HashMap<_, u64> = HashMap::new(); for (msg, signers) in &self.map { if let Some(v) = &msg.high_vote { - *count.entry(v.proposal).or_default() += - genesis.validators.weight(signers); + *count.entry(v.proposal).or_default() += genesis.validators.weight(signers); } } // We only take one value from the iterator because there can only be at most one block with a quorum of 2f+1 votes. From 8168af734f7e9a8498a972b069ef8b0c197af659 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 10 May 2024 11:14:10 -0300 Subject: [PATCH 56/79] Add trait for the state keeper --- node/actors/network/src/lib.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index bcbb5dce..a496fe87 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -6,6 +6,7 @@ use zksync_concurrency::{ ctx::{self, channel}, limiter, scope, }; +use zksync_consensus_roles::attester; use zksync_consensus_storage::BlockStore; use zksync_consensus_utils::pipe::ActorPipe; @@ -29,6 +30,20 @@ mod watch; pub use config::*; +/// Trait for the shared state of batches between the consensus and the execution layer. +pub trait StateKeeper { + /// Get the L1 batch from storage with the highest number. + fn last_batch(&self) -> attester::BatchNumber; + /// Get the L1 batch QC from storage with the highest number. + fn last_batch_qc(&self) -> attester::BatchNumber; + /// Returns the batch with the given number. + fn get_batch(&self, number: attester::BatchNumber) -> Option; + /// Returns the QC of the batch with the given number. + fn get_batch_qc(&self, number: attester::BatchNumber) -> Option; + /// Store the given QC in the storage. + fn store_qc(&self, qc: attester::BatchQC); +} + /// State of the network actor observable outside of the actor. pub struct Network { /// Consensus network state. From f380e74d6c4e32a61fecf2a853f8bcb7cf49cbbc Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 10 May 2024 17:00:32 -0300 Subject: [PATCH 57/79] Use calculated QC by the node instead of taking it from signers --- node/actors/network/src/gossip/mod.rs | 76 +++++++++++++++++---------- 1 file changed, 48 insertions(+), 28 deletions(-) diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 7cac3afa..ba8e337e 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -129,38 +129,58 @@ impl Network { .await .context("batch signatures")? .clone(); + + // Check next QC to collect signatures for. + let new_qc = self + .last_viewed_qc + .clone() + .and_then(|qc| { + Some(attester::BatchQC::new( + attester::Batch { + number: qc.message.number.next(), + }, + self.genesis(), + )) + }) + .clone() + .unwrap_or_else(|| { + attester::BatchQC::new( + attester::Batch { + number: attester::BatchNumber(0), + }, + self.genesis(), + ) + }); + + // Check signatures for the correct QC. for (_, sig) in signatures.0 { - self.batch_qc + if self + .batch_qc .clone() - .entry(sig.msg.number.clone()) - .or_insert_with(|| attester::BatchQC::new(sig.msg.clone(), self.genesis())) - .add(&sig, self.genesis())?; - } - // Now we check if we have enough weight to continue. - if let Some(last_qc) = self.last_viewed_qc.clone() { - let weight = self.genesis().attesters.weight( - &self - .batch_qc - .get(&last_qc.message.number.next()) - .context("last qc")? - .signers, - ); - if weight < self.genesis().attesters.threshold() { - return Ok(()); - }; - } else { - let weight = self.genesis().attesters.weight( - &self - .batch_qc - .get(&attester::BatchNumber(0)) - .context("L1 batch QC")? - .signers, - ); - if weight < self.genesis().attesters.threshold() { - return Ok(()); - }; + .entry(new_qc.message.number.clone()) + .or_insert_with(|| { + attester::BatchQC::new(new_qc.message.clone(), self.genesis()) + }) + .add(&sig, self.genesis()) + .is_err() + { + // TODO: Should we ban the peer somehow? + continue; + } } + let weight = self.genesis().attesters.weight( + &self + .batch_qc + .get(&new_qc.message.number) + .context("last qc")? + .signers, + ); + + if weight < self.genesis().attesters.threshold() { + return Ok(()); + }; + // If we have enough weight, we can propagate the QC. } } From c9f615257ed957cae3c426fe213b32cf63b0a31e Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 10 May 2024 17:03:29 -0300 Subject: [PATCH 58/79] Add extra comments on QC updater --- node/actors/network/src/gossip/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index ba8e337e..05a4a044 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -120,7 +120,7 @@ impl Network { /// Task that keeps hearing about new signatures and updates the L1 batch qc. /// It will propagate the QC if there's enough signatures. - pub(crate) async fn update_batch_qc(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub(crate) async fn update_batch_qc(&mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { // FIXME This is not a good way to do this, we shouldn't be verifying the QC every time // Can we get only the latest signatures? loop { @@ -181,7 +181,7 @@ impl Network { return Ok(()); }; - // If we have enough weight, we can propagate the QC. + // If we have enough weight, we can update the last viewed QC and propagate it. } } } From eabc7d7116415cf2b59f4b4f6f6fc63185859fe8 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 13 May 2024 15:09:04 -0300 Subject: [PATCH 59/79] Panic if overflow happens for batch number --- node/actors/network/src/gossip/mod.rs | 2 +- node/libs/roles/src/attester/messages/batch.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 05a4a044..3d8cb798 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -120,7 +120,7 @@ impl Network { /// Task that keeps hearing about new signatures and updates the L1 batch qc. /// It will propagate the QC if there's enough signatures. - pub(crate) async fn update_batch_qc(&mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub(crate) async fn update_batch_qc(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { // FIXME This is not a good way to do this, we shouldn't be verifying the QC every time // Can we get only the latest signatures? loop { diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index 2c772ea4..9e26b533 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -10,7 +10,7 @@ pub struct BatchNumber(pub u64); impl BatchNumber { /// Increment the batch number. pub fn next(&self) -> BatchNumber { - BatchNumber(self.0.checked_add(1).unwrap_or(0)) + BatchNumber(self.0 + 1) } } From e163378ea06449ac46ccea46a544d0e15bf7f7e9 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 13 May 2024 15:20:02 -0300 Subject: [PATCH 60/79] Use validator function for max faulty weight --- node/libs/roles/src/attester/messages/msg.rs | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/node/libs/roles/src/attester/messages/msg.rs b/node/libs/roles/src/attester/messages/msg.rs index 42d2722b..21837608 100644 --- a/node/libs/roles/src/attester/messages/msg.rs +++ b/node/libs/roles/src/attester/messages/msg.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, fmt}; -use crate::attester; +use crate::{attester, validator}; use anyhow::Context as _; use bit_vec::BitVec; use zksync_consensus_crypto::{keccak256, ByteFmt, Text, TextFmt}; @@ -164,28 +164,13 @@ impl Committee { pub fn total_weight(&self) -> u64 { self.total_weight } - - /// Maximal weight of faulty replicas allowed in this attester committee. - pub fn max_faulty_weight(&self) -> u64 { - max_faulty_weight(self.total_weight()) - } } /// Calculate the attester threshold, that is the minimum votes weight for any attesters action to be valid, /// for a given committee total weight. /// Technically we need just n > f+1, but for now we use a threshold consistent with the validator committee. pub fn threshold(total_weight: u64) -> u64 { - total_weight - max_faulty_weight(total_weight) -} - -/// Calculate the maximum allowed weight for faulty replicas, for a given total weight. -pub fn max_faulty_weight(total_weight: u64) -> u64 { - // Calculate the allowed maximum weight of faulty replicas. We want the following relationship to hold: - // n = 5*f + 1 - // for n total weight and f faulty weight. This results in the following formula for the maximum - // weight of faulty replicas: - // f = floor((n - 1) / 5) - (total_weight - 1) / 5 + total_weight - validator::max_faulty_weight(total_weight) } impl std::ops::BitOrAssign<&Self> for Signers { From 246d834d49cc84a7b4aebfc03695470f8eadeb63 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 13 May 2024 15:44:08 -0300 Subject: [PATCH 61/79] Use batch votes instead of batch signatures --- node/actors/network/src/config.rs | 6 ++-- .../{batch_signatures.rs => batch_votes.rs} | 28 +++++++++---------- node/actors/network/src/gossip/mod.rs | 28 +++++++++---------- node/actors/network/src/gossip/runner.rs | 18 ++++++------ node/actors/network/src/gossip/tests/mod.rs | 27 ++++++++---------- node/actors/network/src/proto/gossip.proto | 2 +- .../network/src/rpc/push_batch_votes.rs | 6 ++-- 7 files changed, 55 insertions(+), 60 deletions(-) rename node/actors/network/src/gossip/{batch_signatures.rs => batch_votes.rs} (83%) diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs index 30326c0b..c7bf8a3e 100644 --- a/node/actors/network/src/config.rs +++ b/node/actors/network/src/config.rs @@ -20,8 +20,8 @@ pub struct RpcConfig { pub get_block_timeout: Option, /// Max rate of sending/receiving consensus messages. pub consensus_rate: limiter::Rate, - /// Max rate of sending/receiving l1 batch signature messages. - pub push_batch_signature_rate: limiter::Rate, + /// Max rate of sending/receiving l1 batch votes messages. + pub push_batch_votes_rate: limiter::Rate, } impl Default for RpcConfig { @@ -44,7 +44,7 @@ impl Default for RpcConfig { burst: 10, refresh: time::Duration::ZERO, }, - push_batch_signature_rate: limiter::Rate { + push_batch_votes_rate: limiter::Rate { burst: 2, refresh: time::Duration::milliseconds(500), }, diff --git a/node/actors/network/src/gossip/batch_signatures.rs b/node/actors/network/src/gossip/batch_votes.rs similarity index 83% rename from node/actors/network/src/gossip/batch_signatures.rs rename to node/actors/network/src/gossip/batch_votes.rs index 1e8a5535..23a2164f 100644 --- a/node/actors/network/src/gossip/batch_signatures.rs +++ b/node/actors/network/src/gossip/batch_votes.rs @@ -7,11 +7,11 @@ use zksync_consensus_roles::attester::{self, Batch}; /// Mapping from attester::PublicKey to a signed attester::Batch message. /// Represents the currents state of node's knowledge about the attester signatures. #[derive(Clone, Default, PartialEq, Eq)] -pub(crate) struct BatchSignatures( +pub(crate) struct BatchVotes( pub(super) im::HashMap>>, ); -impl BatchSignatures { +impl BatchVotes { /// Returns a set of entries of `self` which are newer than the entries in `b`. pub(super) fn get_newer(&self, b: &Self) -> Vec>> { let mut newer = vec![]; @@ -66,23 +66,23 @@ impl BatchSignatures { } } -/// Watch wrapper of BatchSignatures, -/// which supports subscribing to BatchSignatures updates. -pub(crate) struct BatchSignaturesWatch(Watch); +/// Watch wrapper of BatchVotes, +/// which supports subscribing to BatchVotes updates. +pub(crate) struct BatchVotesWatch(Watch); -impl Default for BatchSignaturesWatch { +impl Default for BatchVotesWatch { fn default() -> Self { - Self(Watch::new(BatchSignatures::default())) + Self(Watch::new(BatchVotes::default())) } } -impl BatchSignaturesWatch { - /// Subscribes to BatchSignatures updates. - pub(crate) fn subscribe(&self) -> sync::watch::Receiver { +impl BatchVotesWatch { + /// Subscribes to BatchVotes updates. + pub(crate) fn subscribe(&self) -> sync::watch::Receiver { self.0.subscribe() } - /// Inserts data to BatchSignatures. + /// Inserts data to BatchVotes. /// Subscribers are notified iff at least 1 new entry has /// been inserted. Returns an error iff an invalid /// entry in `data` has been found. The provider of the @@ -93,9 +93,9 @@ impl BatchSignaturesWatch { data: &[Arc>], ) -> anyhow::Result<()> { let this = self.0.lock().await; - let mut signatures = this.borrow().clone(); - if signatures.update(attesters, data)? { - this.send_replace(signatures); + let mut votes = this.borrow().clone(); + if votes.update(attesters, data)? { + this.send_replace(votes); } Ok(()) } diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 3d8cb798..69df5b5c 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -21,9 +21,9 @@ use zksync_concurrency::{ctx, ctx::channel, scope, sync}; use zksync_consensus_roles::{attester, node, validator}; use zksync_consensus_storage::BlockStore; -use self::batch_signatures::BatchSignaturesWatch; +use self::batch_votes::BatchVotesWatch; -mod batch_signatures; +mod batch_votes; mod fetch; mod handshake; mod runner; @@ -43,8 +43,8 @@ pub(crate) struct Network { pub(crate) outbound: PoolWatch, /// Current state of knowledge about validators' endpoints. pub(crate) validator_addrs: ValidatorAddrsWatch, - /// Current state of knowledge about batch signatures. - pub(crate) batch_signatures: BatchSignaturesWatch, + /// Current state of knowledge about batch votes. + pub(crate) batch_votes: BatchVotesWatch, /// Block store to serve `get_block` requests from. pub(crate) block_store: Arc, /// Output pipe of the network actor. @@ -74,7 +74,7 @@ impl Network { ), outbound: PoolWatch::new(cfg.gossip.static_outbound.keys().cloned().collect(), 0), validator_addrs: ValidatorAddrsWatch::default(), - batch_signatures: BatchSignaturesWatch::default(), + batch_votes: BatchVotesWatch::default(), batch_qc: HashMap::new(), last_viewed_qc: None, cfg, @@ -118,16 +118,16 @@ impl Network { .await; } - /// Task that keeps hearing about new signatures and updates the L1 batch qc. - /// It will propagate the QC if there's enough signatures. + /// Task that keeps hearing about new votes and updates the L1 batch qc. + /// It will propagate the QC if there's enough votes. pub(crate) async fn update_batch_qc(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { - // FIXME This is not a good way to do this, we shouldn't be verifying the QC every time - // Can we get only the latest signatures? + // TODO This is not a good way to do this, we shouldn't be verifying the QC every time + // Can we get only the latest votes? loop { - let mut sub = self.batch_signatures.subscribe(); - let signatures = sync::changed(ctx, &mut sub) + let mut sub = self.batch_votes.subscribe(); + let votes = sync::changed(ctx, &mut sub) .await - .context("batch signatures")? + .context("batch votes")? .clone(); // Check next QC to collect signatures for. @@ -152,8 +152,8 @@ impl Network { ) }); - // Check signatures for the correct QC. - for (_, sig) in signatures.0 { + // Check votes for the correct QC. + for (_, sig) in votes.0 { if self .batch_qc .clone() diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index 4395edfe..45a436ae 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -1,4 +1,4 @@ -use super::{batch_signatures::BatchSignatures, handshake, Network, ValidatorAddrs}; +use super::{batch_votes::BatchVotes, handshake, Network, ValidatorAddrs}; use crate::{noise, preface, rpc}; use anyhow::Context as _; use async_trait::async_trait; @@ -43,7 +43,7 @@ impl rpc::Handler for PushBatchVotesServer<'_> { async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::push_batch_votes::Req) -> anyhow::Result<()> { self.0 - .batch_signatures + .batch_votes .update(&self.0.genesis().attesters, &req.0) .await?; Ok(()) @@ -110,10 +110,8 @@ impl Network { ctx, self.cfg.rpc.push_block_store_state_rate, ); - let push_signature_client = rpc::Client::::new( - ctx, - self.cfg.rpc.push_batch_signature_rate, - ); + let push_signature_client = + rpc::Client::::new(ctx, self.cfg.rpc.push_batch_votes_rate); let push_signature_server = PushBatchVotesServer(self); let push_block_store_state_server = PushBlockStoreStateServer::new(self); let get_block_client = @@ -130,7 +128,7 @@ impl Network { .add_server( ctx, push_signature_server, - self.cfg.rpc.push_batch_signature_rate, + self.cfg.rpc.push_batch_votes_rate, ) .add_client(&push_block_store_state_client) .add_server( @@ -181,10 +179,10 @@ impl Network { } }); - // Push L1 batch signatures updates to peer. + // Push L1 batch votes updates to peer. s.spawn::<()>(async { - let mut old = BatchSignatures::default(); - let mut sub = self.batch_signatures.subscribe(); + let mut old = BatchVotes::default(); + let mut sub = self.batch_votes.subscribe(); sub.mark_changed(); loop { let new = sync::changed(ctx, &mut sub).await?.clone(); diff --git a/node/actors/network/src/gossip/tests/mod.rs b/node/actors/network/src/gossip/tests/mod.rs index 8fdf1db0..a75ba053 100644 --- a/node/actors/network/src/gossip/tests/mod.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -121,7 +121,7 @@ fn random_netaddr( )) } -fn random_batch_signature( +fn random_batch_votes( rng: &mut R, key: &attester::SecretKey, ) -> Arc> { @@ -513,7 +513,7 @@ async fn rate_limiting() { } #[tokio::test] -async fn test_batch_signatures() { +async fn test_batch_votes() { abort_on_panic(); let rng = &mut ctx::test_root(&ctx::RealClock).rng(); @@ -523,15 +523,15 @@ async fn test_batch_signatures() { weight: 1250, })) .unwrap(); - let signatures = BatchSignaturesWatch::default(); - let mut sub = signatures.subscribe(); + let votes = BatchVotesWatch::default(); + let mut sub = votes.subscribe(); // Initial values. let mut want = Signatures::default(); for k in &keys[0..6] { - want.insert(random_batch_signature(rng, k)); + want.insert(random_batch_votes(rng, k)); } - signatures.update(&attesters, &want.as_vec()).await.unwrap(); + votes.update(&attesters, &want.as_vec()).await.unwrap(); assert_eq!(want.0, sub.borrow_and_update().0); // newer batch number @@ -541,10 +541,10 @@ async fn test_batch_signatures() { // older batch number let k4v2 = update_signature(rng, &want.get(&keys[4]).msg, &keys[4], -1); // first entry for a key in the config - let k6v1 = random_batch_signature(rng, &keys[6]); + let k6v1 = random_batch_votes(rng, &keys[6]); // entry for a key outside of the config let k8 = rng.gen(); - let k8v1 = random_batch_signature(rng, &k8); + let k8v1 = random_batch_votes(rng, &k8); want.insert(k0v2.clone()); want.insert(k1v2.clone()); @@ -558,20 +558,17 @@ async fn test_batch_signatures() { // no entry at all for keys[7] k8v1.clone(), ]; - signatures.update(&attesters, &update).await.unwrap(); + votes.update(&attesters, &update).await.unwrap(); assert_eq!(want.0, sub.borrow_and_update().0); // Invalid signature. let mut k0v3 = mk_batch(&keys[1], attester::BatchNumber(rng.gen_range(0..1000))); k0v3.key = keys[0].public(); - assert!(signatures - .update(&attesters, &[Arc::new(k0v3)]) - .await - .is_err()); + assert!(votes.update(&attesters, &[Arc::new(k0v3)]).await.is_err()); assert_eq!(want.0, sub.borrow_and_update().0); // Duplicate entry in the update. - assert!(signatures + assert!(votes .update(&attesters, &[k8v1.clone(), k8v1]) .await .is_err()); @@ -581,5 +578,5 @@ async fn test_batch_signatures() { // TODO: This test is disabled because the logic for attesters to receive and sign batches is not implemented yet. // It should be re-enabled once the logic is implemented. // #[tokio::test(flavor = "multi_thread")] -// async fn test_batch_signatures_propagation() { +// async fn test_batch_votes_propagation() { // } diff --git a/node/actors/network/src/proto/gossip.proto b/node/actors/network/src/proto/gossip.proto index ba733fbc..c82672bd 100644 --- a/node/actors/network/src/proto/gossip.proto +++ b/node/actors/network/src/proto/gossip.proto @@ -19,7 +19,7 @@ message PushValidatorAddrs { } message PushBatchVotes { - // Signed roles.validator.Msg.net_address. + // Signed roles.validator.Msg.signatures repeated roles.attester.Signed signatures = 1; } diff --git a/node/actors/network/src/rpc/push_batch_votes.rs b/node/actors/network/src/rpc/push_batch_votes.rs index d438445d..2021b130 100644 --- a/node/actors/network/src/rpc/push_batch_votes.rs +++ b/node/actors/network/src/rpc/push_batch_votes.rs @@ -25,13 +25,13 @@ impl ProtoFmt for Req { type Proto = proto::PushBatchVotes; fn read(r: &Self::Proto) -> anyhow::Result { - let mut signatures = vec![]; + let mut votes = vec![]; for (i, e) in r.signatures.iter().enumerate() { - signatures.push(Arc::new( + votes.push(Arc::new( ProtoFmt::read(e).with_context(|| format!("signatures[{i}]"))?, )); } - Ok(Self(signatures)) + Ok(Self(votes)) } fn build(&self) -> Self::Proto { From 2d331ef48f2f74c6c030dafd133177ff69ff0f04 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 13 May 2024 16:06:22 -0300 Subject: [PATCH 62/79] Update some names referring to signatures changing it to votes --- node/actors/network/src/gossip/batch_votes.rs | 2 +- node/actors/network/src/gossip/mod.rs | 2 +- node/actors/network/src/proto/gossip.proto | 4 ++-- node/actors/network/src/rpc/push_batch_votes.rs | 10 +++++----- node/libs/roles/src/lib.rs | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/node/actors/network/src/gossip/batch_votes.rs b/node/actors/network/src/gossip/batch_votes.rs index 23a2164f..7fb0100f 100644 --- a/node/actors/network/src/gossip/batch_votes.rs +++ b/node/actors/network/src/gossip/batch_votes.rs @@ -5,7 +5,7 @@ use zksync_concurrency::sync; use zksync_consensus_roles::attester::{self, Batch}; /// Mapping from attester::PublicKey to a signed attester::Batch message. -/// Represents the currents state of node's knowledge about the attester signatures. +/// Represents the currents state of node's knowledge about the attester votes. #[derive(Clone, Default, PartialEq, Eq)] pub(crate) struct BatchVotes( pub(super) im::HashMap>>, diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 69df5b5c..02530ea6 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -130,7 +130,7 @@ impl Network { .context("batch votes")? .clone(); - // Check next QC to collect signatures for. + // Check next QC to collect votes for. let new_qc = self .last_viewed_qc .clone() diff --git a/node/actors/network/src/proto/gossip.proto b/node/actors/network/src/proto/gossip.proto index c82672bd..f2bd6e0f 100644 --- a/node/actors/network/src/proto/gossip.proto +++ b/node/actors/network/src/proto/gossip.proto @@ -19,8 +19,8 @@ message PushValidatorAddrs { } message PushBatchVotes { - // Signed roles.validator.Msg.signatures - repeated roles.attester.Signed signatures = 1; + // Signed roles.validator.Msg.votes + repeated roles.attester.Signed votes = 1; } // State of the local block store. diff --git a/node/actors/network/src/rpc/push_batch_votes.rs b/node/actors/network/src/rpc/push_batch_votes.rs index 2021b130..35c3762c 100644 --- a/node/actors/network/src/rpc/push_batch_votes.rs +++ b/node/actors/network/src/rpc/push_batch_votes.rs @@ -6,13 +6,13 @@ use anyhow::Context as _; use zksync_consensus_roles::attester::{self, Batch}; use zksync_protobuf::ProtoFmt; -/// Signature RPC. +/// PushBatchVotes RPC. pub(crate) struct Rpc; impl super::Rpc for Rpc { const CAPABILITY_ID: mux::CapabilityId = 5; const INFLIGHT: u32 = 1; - const METHOD: &'static str = "push_signature"; + const METHOD: &'static str = "push_batch_votes"; type Req = Req; type Resp = (); } @@ -26,9 +26,9 @@ impl ProtoFmt for Req { fn read(r: &Self::Proto) -> anyhow::Result { let mut votes = vec![]; - for (i, e) in r.signatures.iter().enumerate() { + for (i, e) in r.votes.iter().enumerate() { votes.push(Arc::new( - ProtoFmt::read(e).with_context(|| format!("signatures[{i}]"))?, + ProtoFmt::read(e).with_context(|| format!("votes[{i}]"))?, )); } Ok(Self(votes)) @@ -36,7 +36,7 @@ impl ProtoFmt for Req { fn build(&self) -> Self::Proto { Self::Proto { - signatures: self.0.iter().map(|a| ProtoFmt::build(a.as_ref())).collect(), + votes: self.0.iter().map(|a| ProtoFmt::build(a.as_ref())).collect(), } } } diff --git a/node/libs/roles/src/lib.rs b/node/libs/roles/src/lib.rs index 7a5e9366..38457858 100644 --- a/node/libs/roles/src/lib.rs +++ b/node/libs/roles/src/lib.rs @@ -6,7 +6,7 @@ //! - `Validator`: a node that participates in the consensus protocol, so it votes for blocks and produces blocks. //! It also participates in the validator network, which is a mesh network just for validators. Not //! every node has this role. -//! - `Attester`: a node that signs the L1 batches and broadcasts the signatures to the gossip network. +//! - `Attester`: a node that signs the L1 batches and broadcasts the signatures known as votes to the gossip network. //! Not every node has this role. pub mod attester; From 7eee7dc569997a17bf4d0fd32de2e366ebb0c202 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 13 May 2024 16:07:54 -0300 Subject: [PATCH 63/79] Move trait to persist batches to the storage workspace --- node/actors/network/src/lib.rs | 14 -------------- node/libs/storage/src/batch_store.rs | 15 +++++++++++++++ node/libs/storage/src/lib.rs | 2 ++ 3 files changed, 17 insertions(+), 14 deletions(-) create mode 100644 node/libs/storage/src/batch_store.rs diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index a496fe87..d9573a46 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -30,20 +30,6 @@ mod watch; pub use config::*; -/// Trait for the shared state of batches between the consensus and the execution layer. -pub trait StateKeeper { - /// Get the L1 batch from storage with the highest number. - fn last_batch(&self) -> attester::BatchNumber; - /// Get the L1 batch QC from storage with the highest number. - fn last_batch_qc(&self) -> attester::BatchNumber; - /// Returns the batch with the given number. - fn get_batch(&self, number: attester::BatchNumber) -> Option; - /// Returns the QC of the batch with the given number. - fn get_batch_qc(&self, number: attester::BatchNumber) -> Option; - /// Store the given QC in the storage. - fn store_qc(&self, qc: attester::BatchQC); -} - /// State of the network actor observable outside of the actor. pub struct Network { /// Consensus network state. diff --git a/node/libs/storage/src/batch_store.rs b/node/libs/storage/src/batch_store.rs new file mode 100644 index 00000000..4129b71b --- /dev/null +++ b/node/libs/storage/src/batch_store.rs @@ -0,0 +1,15 @@ +use zksync_consensus_roles::attester; + +/// Trait for the shared state of batches between the consensus and the execution layer. +pub trait PersistentBatchStore { + /// Get the L1 batch from storage with the highest number. + fn last_batch(&self) -> attester::BatchNumber; + /// Get the L1 batch QC from storage with the highest number. + fn last_batch_qc(&self) -> attester::BatchNumber; + /// Returns the batch with the given number. + fn get_batch(&self, number: attester::BatchNumber) -> Option; + /// Returns the QC of the batch with the given number. + fn get_batch_qc(&self, number: attester::BatchNumber) -> Option; + /// Store the given QC in the storage. + fn store_qc(&self, qc: attester::BatchQC); +} diff --git a/node/libs/storage/src/lib.rs b/node/libs/storage/src/lib.rs index ee017752..33497156 100644 --- a/node/libs/storage/src/lib.rs +++ b/node/libs/storage/src/lib.rs @@ -1,5 +1,6 @@ //! Abstraction for persistent data storage. //! It provides schema-aware type-safe database access. +mod batch_store; mod block_store; pub mod proto; mod replica_store; @@ -8,6 +9,7 @@ pub mod testonly; mod tests; pub use crate::{ + batch_store::PersistentBatchStore, block_store::{BlockStore, BlockStoreRunner, BlockStoreState, PersistentBlockStore}, replica_store::{Proposal, ReplicaState, ReplicaStore}, }; From fab01d52cc6d9c28b7b5d89a48e9c249eb1f3713 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 13 May 2024 16:49:25 -0300 Subject: [PATCH 64/79] Implement deref for committees --- node/libs/roles/src/attester/messages/batch.rs | 2 +- node/libs/roles/src/attester/messages/msg.rs | 15 +++++++++------ node/libs/roles/src/attester/tests.rs | 1 + .../roles/src/validator/messages/consensus.rs | 13 ++++++++----- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index 9e26b533..fb856597 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -116,7 +116,7 @@ impl BatchQC { let messages_and_keys = genesis .attesters - .iter_keys() + .keys() .enumerate() .filter(|(i, _)| self.signers.0[*i]) .map(|(_, pk)| (self.message.clone(), pk)); diff --git a/node/libs/roles/src/attester/messages/msg.rs b/node/libs/roles/src/attester/messages/msg.rs index 21837608..5501cae9 100644 --- a/node/libs/roles/src/attester/messages/msg.rs +++ b/node/libs/roles/src/attester/messages/msg.rs @@ -79,6 +79,14 @@ pub struct Committee { total_weight: u64, } +impl std::ops::Deref for Committee { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.vec + } +} + impl Committee { /// Creates a new Committee from a list of attester public keys. pub fn new(attesters: impl IntoIterator) -> anyhow::Result { @@ -113,13 +121,8 @@ impl Committee { }) } - /// Iterates over weighted attesters. - pub fn iter(&self) -> impl Iterator { - self.vec.iter() - } - /// Iterates over attester keys. - pub fn iter_keys(&self) -> impl Iterator { + pub fn keys(&self) -> impl Iterator { self.vec.iter().map(|v| &v.key) } diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index d334ed55..a86d890b 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -150,6 +150,7 @@ fn test_batch_qc() { qc.add(&key.sign_msg(qc.message.clone()), &setup1.genesis) .unwrap(); } + let expected_weight: u64 = setup1 .genesis .attesters diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index bbae90de..ffd3829d 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -86,6 +86,14 @@ pub struct Committee { total_weight: u64, } +impl std::ops::Deref for Committee { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.vec + } +} + impl Committee { /// Creates a new Committee from a list of validator public keys. pub fn new(validators: impl IntoIterator) -> anyhow::Result { @@ -118,11 +126,6 @@ impl Committee { }) } - /// Iterates over weighted validators. - pub fn iter(&self) -> impl Iterator { - self.vec.iter() - } - /// Iterates over validator keys. pub fn keys(&self) -> impl Iterator { self.vec.iter().map(|v| &v.key) From c6fe4dbef93a9168b773685e005290a7a8c17797 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 13 May 2024 16:50:53 -0300 Subject: [PATCH 65/79] Use map instead of and_then --- node/actors/network/src/gossip/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 02530ea6..269f8b8c 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -134,13 +134,13 @@ impl Network { let new_qc = self .last_viewed_qc .clone() - .and_then(|qc| { - Some(attester::BatchQC::new( + .map(|qc| { + attester::BatchQC::new( attester::Batch { number: qc.message.number.next(), }, self.genesis(), - )) + ) }) .clone() .unwrap_or_else(|| { From 3cec7be24e5a88b344befd6d0795f23ffdea731e Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 13 May 2024 17:03:50 -0300 Subject: [PATCH 66/79] Fix clippy --- node/actors/network/src/lib.rs | 1 - node/libs/storage/src/batch_store.rs | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index d9573a46..bcbb5dce 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -6,7 +6,6 @@ use zksync_concurrency::{ ctx::{self, channel}, limiter, scope, }; -use zksync_consensus_roles::attester; use zksync_consensus_storage::BlockStore; use zksync_consensus_utils::pipe::ActorPipe; diff --git a/node/libs/storage/src/batch_store.rs b/node/libs/storage/src/batch_store.rs index 4129b71b..f885f82d 100644 --- a/node/libs/storage/src/batch_store.rs +++ b/node/libs/storage/src/batch_store.rs @@ -1,3 +1,4 @@ +//! Defines storage layer for batches of blocks. use zksync_consensus_roles::attester; /// Trait for the shared state of batches between the consensus and the execution layer. From 55feb75e4b31a05ad01d4cf08b91293e49c74f49 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 14 May 2024 11:37:45 -0300 Subject: [PATCH 67/79] Fix some unused imports --- node/actors/bft/src/leader/state_machine.rs | 12 +++++------- node/actors/network/src/gossip/testonly.rs | 1 - node/actors/network/src/gossip/tests/mod.rs | 1 - 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/node/actors/bft/src/leader/state_machine.rs b/node/actors/bft/src/leader/state_machine.rs index 0f120aab..7b99504c 100644 --- a/node/actors/bft/src/leader/state_machine.rs +++ b/node/actors/bft/src/leader/state_machine.rs @@ -199,13 +199,11 @@ impl StateMachine { // Broadcast the leader prepare message to all replicas (ourselves included). let msg = cfg .secret_key - .sign_msg(validator::ConsensusMsg::LeaderPrepare( - validator::LeaderPrepare { - proposal, - proposal_payload: payload, - justification, - }, - )); + .sign_msg(ConsensusMsg::LeaderPrepare(validator::LeaderPrepare { + proposal, + proposal_payload: payload, + justification, + })); pipe.send( ConsensusInputMessage { message: msg, diff --git a/node/actors/network/src/gossip/testonly.rs b/node/actors/network/src/gossip/testonly.rs index fcf8bee4..18e544e0 100644 --- a/node/actors/network/src/gossip/testonly.rs +++ b/node/actors/network/src/gossip/testonly.rs @@ -1,7 +1,6 @@ #![allow(dead_code)] use super::*; use crate::{frame, mux, noise, preface, rpc, Config, GossipConfig}; -use anyhow::Context as _; use rand::Rng as _; use std::collections::BTreeMap; use zksync_concurrency::{ctx, limiter}; diff --git a/node/actors/network/src/gossip/tests/mod.rs b/node/actors/network/src/gossip/tests/mod.rs index a75ba053..5fb208e1 100644 --- a/node/actors/network/src/gossip/tests/mod.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -1,6 +1,5 @@ use super::*; use crate::{metrics, preface, rpc, testonly}; -use anyhow::Context as _; use assert_matches::assert_matches; use pretty_assertions::assert_eq; use rand::Rng; From 70d8c918a705b38c6d516d2a41276b6da53ce1ab Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 14 May 2024 17:31:25 -0300 Subject: [PATCH 68/79] Make attesters field in genesis optional --- node/actors/network/src/gossip/mod.rs | 34 ++++++++++----- node/actors/network/src/gossip/runner.rs | 5 ++- node/actors/network/src/gossip/tests/mod.rs | 4 +- node/actors/network/src/lib.rs | 1 + .../libs/roles/src/attester/messages/batch.rs | 25 ++++++----- node/libs/roles/src/attester/tests.rs | 31 +++++++++++--- node/libs/roles/src/validator/conv.rs | 8 +++- .../roles/src/validator/messages/consensus.rs | 2 +- .../roles/src/validator/messages/tests.rs | 41 ++++++++++++++++--- node/libs/roles/src/validator/testonly.rs | 3 +- node/libs/roles/src/validator/tests.rs | 4 ++ 11 files changed, 119 insertions(+), 39 deletions(-) diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 269f8b8c..b40b2f8c 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -142,7 +142,6 @@ impl Network { self.genesis(), ) }) - .clone() .unwrap_or_else(|| { attester::BatchQC::new( attester::Batch { @@ -150,7 +149,8 @@ impl Network { }, self.genesis(), ) - }); + }) + .context("new qc")?; // Check votes for the correct QC. for (_, sig) in votes.0 { @@ -159,7 +159,7 @@ impl Network { .clone() .entry(new_qc.message.number.clone()) .or_insert_with(|| { - attester::BatchQC::new(new_qc.message.clone(), self.genesis()) + attester::BatchQC::new(new_qc.message.clone(), self.genesis()).expect("qc") }) .add(&sig, self.genesis()) .is_err() @@ -169,15 +169,27 @@ impl Network { } } - let weight = self.genesis().attesters.weight( - &self - .batch_qc - .get(&new_qc.message.number) - .context("last qc")? - .signers, - ); + let weight = self + .genesis() + .attesters + .as_ref() + .context("attesters")? + .weight( + &self + .batch_qc + .get(&new_qc.message.number) + .context("last qc")? + .signers, + ); - if weight < self.genesis().attesters.threshold() { + if weight + < self + .genesis() + .attesters + .as_ref() + .context("attesters")? + .threshold() + { return Ok(()); }; diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index 45a436ae..ede9c0e9 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -44,7 +44,10 @@ impl rpc::Handler for PushBatchVotesServer<'_> { async fn handle(&self, _ctx: &ctx::Ctx, req: rpc::push_batch_votes::Req) -> anyhow::Result<()> { self.0 .batch_votes - .update(&self.0.genesis().attesters, &req.0) + .update( + self.0.genesis().attesters.as_ref().context("attesters")?, + &req.0, + ) .await?; Ok(()) } diff --git a/node/actors/network/src/gossip/tests/mod.rs b/node/actors/network/src/gossip/tests/mod.rs index 5fb208e1..b16eefee 100644 --- a/node/actors/network/src/gossip/tests/mod.rs +++ b/node/actors/network/src/gossip/tests/mod.rs @@ -535,7 +535,7 @@ async fn test_batch_votes() { // newer batch number let k0v2 = update_signature(rng, &want.get(&keys[0]).msg, &keys[0], 1); - // same batch number, newer timestamp + // same batch number let k1v2 = update_signature(rng, &want.get(&keys[1]).msg, &keys[1], 0); // older batch number let k4v2 = update_signature(rng, &want.get(&keys[4]).msg, &keys[4], -1); @@ -562,7 +562,7 @@ async fn test_batch_votes() { // Invalid signature. let mut k0v3 = mk_batch(&keys[1], attester::BatchNumber(rng.gen_range(0..1000))); - k0v3.key = keys[0].public(); + k0v3.sig = rng.gen(); assert!(votes.update(&attesters, &[Arc::new(k0v3)]).await.is_err()); assert_eq!(want.0, sub.borrow_and_update().0); diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index bcbb5dce..e7fc7e59 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -124,6 +124,7 @@ impl Runner { // Update QC batches in the background. s.spawn(async { + // TODO: Handle this correctly. let _ = self.net.gossip.update_batch_qc(ctx).await; Ok(()) }); diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index fb856597..bc1a02fa 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -1,7 +1,7 @@ use crate::{attester, validator::Genesis}; use super::{Signed, Signers}; -use anyhow::ensure; +use anyhow::{ensure, Context as _}; #[derive(Clone, Debug, PartialEq, Eq, Hash, Default, PartialOrd)] /// A batch number. @@ -73,12 +73,12 @@ pub enum BatchQCAddError { impl BatchQC { /// Create a new empty instance for a given `Batch` message. - pub fn new(message: Batch, genesis: &Genesis) -> Self { - Self { + pub fn new(message: Batch, genesis: &Genesis) -> anyhow::Result { + Ok(Self { message, - signers: Signers::new(genesis.attesters.len()), + signers: Signers::new(genesis.attesters.as_ref().context("attesters")?.len()), signature: attester::AggregateSignature::default(), - } + }) } /// Add a attester's signature. @@ -88,6 +88,8 @@ impl BatchQC { ensure!(self.message == msg.msg, Error::InconsistentMessages); let i = genesis .attesters + .as_ref() + .expect("attesters set is empty in genesis") // This case should never happen .index(&msg.key) .ok_or(Error::SignerNotInCommittee { signer: Box::new(msg.key.clone()), @@ -101,12 +103,16 @@ impl BatchQC { /// Verifies the signature of the BatchQC. pub fn verify(&self, genesis: &Genesis) -> Result<(), BatchQCVerifyError> { use BatchQCVerifyError as Error; - if self.signers.len() != genesis.attesters.len() { + let attesters = genesis + .attesters + .as_ref() + .expect("attesters set is empty in genesis"); // This case should never happen + if self.signers.len() != attesters.len() { return Err(Error::BadSignersSet); } // Verify that the signer's weight is sufficient. - let weight = genesis.attesters.weight(&self.signers); - let threshold = genesis.attesters.threshold(); + let weight = attesters.weight(&self.signers); + let threshold = attesters.threshold(); if weight < threshold { return Err(Error::NotEnoughSigners { got: weight, @@ -114,8 +120,7 @@ impl BatchQC { }); } - let messages_and_keys = genesis - .attesters + let messages_and_keys = attesters .keys() .enumerate() .filter(|(i, _)| self.signers.0[*i]) diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index a86d890b..e00a3afe 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -141,11 +141,22 @@ fn test_batch_qc() { let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let mut genesis3 = (*setup1.genesis).clone(); - genesis3.attesters = Committee::new(setup1.genesis.attesters.iter().take(3).cloned()).unwrap(); + genesis3.attesters = Committee::new( + setup1 + .genesis + .attesters + .as_ref() + .unwrap() + .iter() + .take(3) + .cloned(), + ) + .unwrap() + .into(); let genesis3 = genesis3.with_hash(); for i in 0..setup1.attester_keys.len() + 1 { - let mut qc = BatchQC::new(make_batch_msg(rng), &setup1.genesis); + let mut qc = BatchQC::new(make_batch_msg(rng), &setup1.genesis).unwrap(); for key in &setup1.attester_keys[0..i] { qc.add(&key.sign_msg(qc.message.clone()), &setup1.genesis) .unwrap(); @@ -154,11 +165,13 @@ fn test_batch_qc() { let expected_weight: u64 = setup1 .genesis .attesters + .as_ref() + .unwrap() .iter() .take(i) .map(|w| w.weight) .sum(); - if expected_weight >= setup1.genesis.attesters.threshold() { + if expected_weight >= setup1.genesis.attesters.as_ref().unwrap().threshold() { assert!(qc.verify(&setup1.genesis).is_ok()); } else { assert_matches!( @@ -184,11 +197,19 @@ fn test_attester_committee_weights() { let sums = [1000, 1600, 2400, 8400, 9300, 10000]; let msg = make_batch_msg(rng); - let mut qc = BatchQC::new(msg.clone(), &setup.genesis); + let mut qc = BatchQC::new(msg.clone(), &setup.genesis).unwrap(); for (n, weight) in sums.iter().enumerate() { let key = &setup.attester_keys[n]; qc.add(&key.sign_msg(msg.clone()), &setup.genesis).unwrap(); - assert_eq!(setup.genesis.attesters.weight(&qc.signers), *weight); + assert_eq!( + setup + .genesis + .attesters + .as_ref() + .unwrap() + .weight(&qc.signers), + *weight + ); } } diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 2a592cc4..58dcd603 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -40,7 +40,7 @@ impl ProtoFmt for GenesisRaw { protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), validators: Committee::new(validators.into_iter()).context("validators_v1")?, - attesters: attester::Committee::new(attesters.into_iter()).context("attesters")?, + attesters: Some(attester::Committee::new(attesters.into_iter()).context("attesters")?), leader_selection: read_required(&r.leader_selection).context("leader_selection")?, }) } @@ -52,7 +52,11 @@ impl ProtoFmt for GenesisRaw { protocol_version: Some(self.protocol_version.0), validators_v1: self.validators.iter().map(|v| v.build()).collect(), - attesters: self.attesters.iter().map(|v| v.build()).collect(), + attesters: self + .attesters + .as_ref() + .map(|a| a.iter().map(|v| v.build()).collect()) + .unwrap_or_default(), leader_selection: Some(self.leader_selection.build()), } } diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index ffd3829d..882bdcee 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -245,7 +245,7 @@ pub struct GenesisRaw { /// Set of validators of the chain. pub validators: Committee, /// Set of attesters of the chain. - pub attesters: attester::Committee, + pub attesters: Option, /// The mode used for selecting leader for a given view. pub leader_selection: LeaderSelectionMode, } diff --git a/node/libs/roles/src/validator/messages/tests.rs b/node/libs/roles/src/validator/messages/tests.rs index 7b35c61d..32ac4d98 100644 --- a/node/libs/roles/src/validator/messages/tests.rs +++ b/node/libs/roles/src/validator/messages/tests.rs @@ -142,7 +142,7 @@ mod version1 { use super::*; /// Hardcoded genesis. - fn genesis() -> Genesis { + fn genesis_empty_attesters() -> Genesis { GenesisRaw { chain_id: ChainId(1337), fork_number: ForkNumber(402598740274745173), @@ -150,7 +150,22 @@ mod version1 { protocol_version: ProtocolVersion(1), validators: validator_committee(), - attesters: attester_committee(), + attesters: None, + leader_selection: LeaderSelectionMode::Weighted, + } + .with_hash() + } + + /// Hardcoded genesis. + fn genesis_with_attesters() -> Genesis { + GenesisRaw { + chain_id: ChainId(1337), + fork_number: ForkNumber(402598740274745173), + first_block: BlockNumber(8902834932452), + + protocol_version: ProtocolVersion(1), + validators: validator_committee(), + attesters: attester_committee().into(), leader_selection: LeaderSelectionMode::Weighted, } .with_hash() @@ -167,7 +182,21 @@ mod version1 { ) .decode() .unwrap(); - assert_eq!(want, genesis().hash()); + assert_eq!(want, genesis_empty_attesters().hash()); + } + + /// Note that genesis is NOT versioned by ProtocolVersion. + /// Even if it was, ALL versions of genesis need to be supported FOREVER, + /// unless we introduce dynamic regenesis. + /// FIXME: This fails with the new attester committee. + #[test] + fn genesis_hash_change_detector_2() { + let want: GenesisHash = Text::new( + "genesis_hash:keccak256:63d6562ea2a27069e64a4005d1aef446907db945d85e06323296d2c0f8336c65", + ) + .decode() + .unwrap(); + assert_eq!(want, genesis_with_attesters().hash()); } #[test] @@ -197,7 +226,7 @@ mod version1 { /// Hardcoded view. fn view() -> View { View { - genesis: genesis().hash(), + genesis: genesis_empty_attesters().hash(), number: ViewNumber(9136573498460759103), } } @@ -220,7 +249,7 @@ mod version1 { /// Hardcoded `CommitQC`. fn commit_qc() -> CommitQC { - let genesis = genesis(); + let genesis = genesis_empty_attesters(); let replica_commit = replica_commit(); let mut x = CommitQC::new(replica_commit.clone(), &genesis); for k in validator_keys() { @@ -249,7 +278,7 @@ mod version1 { /// Hardcoded `PrepareQC`. fn prepare_qc() -> PrepareQC { let mut x = PrepareQC::new(view()); - let genesis = genesis(); + let genesis = genesis_empty_attesters(); let replica_prepare = replica_prepare(); for k in validator_keys() { x.add(&k.sign_msg(replica_prepare.clone()), &genesis) diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 2eb8e44d..df7c09c6 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -165,7 +165,8 @@ impl From for Setup { weight: *w, } })) - .unwrap(), + .unwrap() + .into(), leader_selection: spec.leader_selection, } .with_hash(), diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index 337d0f1a..7e9d571d 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -226,6 +226,8 @@ fn test_commit_qc() { let expected_weight: u64 = setup1 .genesis .attesters + .as_ref() + .unwrap() .iter() .take(i) .map(|w| w.weight) @@ -327,6 +329,8 @@ fn test_prepare_qc() { let expected_weight: u64 = setup1 .genesis .attesters + .as_ref() + .unwrap() .iter() .take(n) .map(|w| w.weight) From 13b63d96473d9a25ab98623d8bfe826e97403f19 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 15 May 2024 16:00:55 -0300 Subject: [PATCH 69/79] Optional build and read protobuff for attester committee --- node/libs/roles/src/proto/validator.proto | 2 +- node/libs/roles/src/validator/conv.rs | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/node/libs/roles/src/proto/validator.proto b/node/libs/roles/src/proto/validator.proto index 922c405e..87600b30 100644 --- a/node/libs/roles/src/proto/validator.proto +++ b/node/libs/roles/src/proto/validator.proto @@ -16,7 +16,7 @@ message Genesis { // We will either remove them entirely, or keep them for the initial epoch. optional uint32 protocol_version = 8; // required; ProtocolVersion repeated WeightedValidator validators_v1 = 3; - repeated attester.WeightedAttester attesters = 9; + repeated attester.WeightedAttester attesters = 9; // optional optional LeaderSelectionMode leader_selection = 4; // required } diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 58dcd603..05c34bb9 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -40,7 +40,8 @@ impl ProtoFmt for GenesisRaw { protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), validators: Committee::new(validators.into_iter()).context("validators_v1")?, - attesters: Some(attester::Committee::new(attesters.into_iter()).context("attesters")?), + attesters: (attesters.len() > 0) + .then_some(attester::Committee::new(attesters.into_iter()).context("attesters")?), leader_selection: read_required(&r.leader_selection).context("leader_selection")?, }) } @@ -55,7 +56,7 @@ impl ProtoFmt for GenesisRaw { attesters: self .attesters .as_ref() - .map(|a| a.iter().map(|v| v.build()).collect()) + .map(|c| c.iter().map(|v| v.build()).collect()) .unwrap_or_default(), leader_selection: Some(self.leader_selection.build()), } From d521f1000b9c2eab85645c5fdab088a1a3fc551a Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 15 May 2024 16:04:40 -0300 Subject: [PATCH 70/79] Fix clippy lint --- node/libs/roles/src/validator/conv.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 05c34bb9..839d800f 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -40,7 +40,7 @@ impl ProtoFmt for GenesisRaw { protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), validators: Committee::new(validators.into_iter()).context("validators_v1")?, - attesters: (attesters.len() > 0) + attesters: (!attesters.is_empty()) .then_some(attester::Committee::new(attesters.into_iter()).context("attesters")?), leader_selection: read_required(&r.leader_selection).context("leader_selection")?, }) From 520e41250755a7b7388d19aee6814574fb6df549 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 15 May 2024 16:35:26 -0300 Subject: [PATCH 71/79] Delete attester key from the config --- node/tools/src/bin/deployer.rs | 2 -- node/tools/src/bin/localnet_config.rs | 2 -- node/tools/src/config.rs | 7 +------ node/tools/src/proto/mod.proto | 3 --- node/tools/src/tests.rs | 1 - 5 files changed, 1 insertion(+), 14 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 634481d5..609f2628 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -30,7 +30,6 @@ fn generate_consensus_nodes(nodes: usize, seed_nodes_amount: Option) -> V let setup = validator::testonly::Setup::new(rng, nodes); let validator_keys = setup.validator_keys.clone(); - let attester_keys = setup.attester_keys.clone(); // Each node will have `gossip_peers` outbound peers. let peers = 2; @@ -48,7 +47,6 @@ fn generate_consensus_nodes(nodes: usize, seed_nodes_amount: Option) -> V genesis: setup.genesis.clone(), max_payload_size: 1000000, validator_key: Some(validator_keys[i].clone()), - attester_key: Some(attester_keys[i].clone()), node_key: node_keys[i].clone(), gossip_dynamic_inbound_limit: 2, gossip_static_inbound: [].into(), diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index 3c5c2a39..18d3c00e 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -59,7 +59,6 @@ fn main() -> anyhow::Result<()> { let setup = validator::testonly::Setup::new(rng, validator_count); let validator_keys = setup.validator_keys.clone(); - let attester_keys = setup.attester_keys.clone(); // Each node will have `gossip_peers` outbound peers. let nodes = addrs.len(); @@ -78,7 +77,6 @@ fn main() -> anyhow::Result<()> { max_payload_size: 1000000, node_key: node_keys[i].clone(), validator_key: validator_keys.get(i).cloned(), - attester_key: attester_keys.get(i).cloned(), gossip_dynamic_inbound_limit: peers, gossip_static_inbound: HashSet::default(), gossip_static_outbound: HashMap::default(), diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 0f950ef4..23878391 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -11,7 +11,7 @@ use zksync_concurrency::{ctx, net}; use zksync_consensus_bft as bft; use zksync_consensus_crypto::{read_optional_text, read_required_text, Text, TextFmt}; use zksync_consensus_executor as executor; -use zksync_consensus_roles::{attester, node, validator}; +use zksync_consensus_roles::{node, validator}; use zksync_consensus_storage::{BlockStore, BlockStoreRunner}; use zksync_protobuf::{read_required, required, ProtoFmt}; @@ -93,7 +93,6 @@ pub struct AppConfig { pub genesis: validator::Genesis, pub max_payload_size: usize, pub validator_key: Option, - pub attester_key: Option, pub node_key: node::SecretKey, pub gossip_dynamic_inbound_limit: usize, @@ -134,9 +133,6 @@ impl ProtoFmt for AppConfig { // TODO: read secret. validator_key: read_optional_secret_text(&r.validator_secret_key) .context("validator_secret_key")?, - attester_key: read_optional_secret_text(&r.attester_secret_key) - .context("attester_secret_key")?, - node_key: read_required_secret_text(&r.node_secret_key).context("node_secret_key")?, gossip_dynamic_inbound_limit: required(&r.gossip_dynamic_inbound_limit) .and_then(|x| Ok((*x).try_into()?)) @@ -156,7 +152,6 @@ impl ProtoFmt for AppConfig { genesis: Some(self.genesis.build()), max_payload_size: Some(self.max_payload_size.try_into().unwrap()), validator_secret_key: self.validator_key.as_ref().map(TextFmt::encode), - attester_secret_key: self.attester_key.as_ref().map(TextFmt::encode), node_secret_key: Some(self.node_key.encode()), gossip_dynamic_inbound_limit: Some( diff --git a/node/tools/src/proto/mod.proto b/node/tools/src/proto/mod.proto index 85c7fc74..449d144f 100644 --- a/node/tools/src/proto/mod.proto +++ b/node/tools/src/proto/mod.proto @@ -79,9 +79,6 @@ message AppConfig { // Validator secret key. optional string validator_secret_key = 10; // optional; ValidatorSecretKey - // Validator secret key. - optional string attester_secret_key = 12; // optional; AttesterSecretKey - // Gossip network // Node secret key. diff --git a/node/tools/src/tests.rs b/node/tools/src/tests.rs index 2f35d017..79f6a56c 100644 --- a/node/tools/src/tests.rs +++ b/node/tools/src/tests.rs @@ -25,7 +25,6 @@ impl Distribution for EncodeDist { genesis: genesis.with_hash(), max_payload_size: rng.gen(), validator_key: self.sample_opt(|| rng.gen()), - attester_key: self.sample_opt(|| rng.gen()), node_key: rng.gen(), gossip_dynamic_inbound_limit: rng.gen(), From ef5c654ed22a711dabe15b1d277578cd22b56a4a Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 16 May 2024 11:50:01 -0300 Subject: [PATCH 72/79] Generate valid genesis to verify in test --- node/libs/roles/src/validator/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index 7e9d571d..60695436 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -95,11 +95,11 @@ fn test_schema_encoding() { } #[test] -fn test_genesis_schema_decode() { +fn test_genesis_verify() { let ctx = ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let genesis = rng.gen::(); + let genesis = Setup::new(rng, 1).genesis.clone(); assert!(genesis.verify().is_ok()); assert!(Genesis::read(&genesis.build()).is_ok()); From 034f196f0cd4e88d39392f9d7dda1e9ba323df04 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 16 May 2024 12:01:32 -0300 Subject: [PATCH 73/79] Fix clippy lints with latest rust version --- node/actors/network/src/mux/transient_stream.rs | 1 + node/libs/concurrency/src/ctx/mod.rs | 5 +---- node/libs/concurrency/src/scope/mod.rs | 4 ++-- node/libs/protobuf/src/std_conv.rs | 2 +- node/tools/src/config.rs | 4 ++-- 5 files changed, 7 insertions(+), 9 deletions(-) diff --git a/node/actors/network/src/mux/transient_stream.rs b/node/actors/network/src/mux/transient_stream.rs index e4736f69..25dd2818 100644 --- a/node/actors/network/src/mux/transient_stream.rs +++ b/node/actors/network/src/mux/transient_stream.rs @@ -20,6 +20,7 @@ pub(crate) struct ReadStream(pub(super) sync::ExclusiveLock) pub(crate) struct WriteStream(pub(super) sync::ExclusiveLock); /// Error returned by `ReadStream::read` in case the stream has been closed by peer. +#[allow(dead_code)] #[derive(Debug, thiserror::Error)] #[error("end of stream")] pub(crate) struct EndOfStream; diff --git a/node/libs/concurrency/src/ctx/mod.rs b/node/libs/concurrency/src/ctx/mod.rs index 6af9af06..e716b84e 100644 --- a/node/libs/concurrency/src/ctx/mod.rs +++ b/node/libs/concurrency/src/ctx/mod.rs @@ -267,10 +267,7 @@ pub enum Error { pub type Result = std::result::Result; impl crate::error::Wrap for Error { - fn with_wrap C>( - self, - f: F, - ) -> Self { + fn with_wrap C>(self, f: F) -> Self { match self { Error::Internal(err) => Error::Internal(err.context(f())), err => err, diff --git a/node/libs/concurrency/src/scope/mod.rs b/node/libs/concurrency/src/scope/mod.rs index 40f7acd5..e9fd9404 100644 --- a/node/libs/concurrency/src/scope/mod.rs +++ b/node/libs/concurrency/src/scope/mod.rs @@ -315,10 +315,10 @@ impl<'env, E: 'static + Send> Scope<'env, E> { /// task (in particular, not from async code). /// Behaves analogically to `run`. #[doc(hidden)] - pub fn run_blocking(&'env mut self, root_task: F) -> Result + pub fn run_blocking(&'env mut self, root_task: F) -> Result where E: 'static + Send, - F: 'env + FnOnce(&'env ctx::Ctx, &'env Self) -> Result, + F: 'env + Send + FnOnce(&'env ctx::Ctx, &'env Self) -> Result, { let guard = Arc::new(State::make(self.ctx.clone())); self.cancel_guard = Arc::downgrade(&guard); diff --git a/node/libs/protobuf/src/std_conv.rs b/node/libs/protobuf/src/std_conv.rs index 76ac4137..b4240dbb 100644 --- a/node/libs/protobuf/src/std_conv.rs +++ b/node/libs/protobuf/src/std_conv.rs @@ -14,7 +14,7 @@ impl ProtoFmt for () { } } -impl ProtoFmt for std::net::SocketAddr { +impl ProtoFmt for net::SocketAddr { type Proto = proto::std::SocketAddr; fn read(r: &Self::Proto) -> anyhow::Result { diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 23878391..cd199a2c 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -1,7 +1,7 @@ //! Node configuration. use crate::{proto, store}; use anyhow::Context as _; -use serde_json::{ser::Formatter, Serializer}; +use serde_json::ser::Formatter; use std::{ collections::{HashMap, HashSet}, net::SocketAddr, @@ -51,7 +51,7 @@ pub fn encode_json(x: &T) -> String { /// Encodes a generated proto message for arbitrary ProtoFmt with provided serializer. pub(crate) fn encode_with_serializer( x: &T, - mut serializer: Serializer, F>, + mut serializer: serde_json::Serializer, F>, ) -> String { T::serialize(x, &mut serializer).unwrap(); String::from_utf8(serializer.into_inner()).unwrap() From 77e45d65d2bb9e5ff6a422b3462f5fd50990bd25 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 16 May 2024 13:10:19 -0300 Subject: [PATCH 74/79] Fix genesis raw encoding for empty attesters --- node/libs/roles/src/validator/conv.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 839d800f..0cef59da 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -40,8 +40,11 @@ impl ProtoFmt for GenesisRaw { protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), validators: Committee::new(validators.into_iter()).context("validators_v1")?, - attesters: (!attesters.is_empty()) - .then_some(attester::Committee::new(attesters.into_iter()).context("attesters")?), + attesters: if attesters.is_empty() { + None + } else { + Some(attester::Committee::new(attesters.into_iter()).context("attesters")?) + }, leader_selection: read_required(&r.leader_selection).context("leader_selection")?, }) } From 834f0fbd72284727663811d40de872ebf9d6d1f6 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 16 May 2024 16:49:46 -0300 Subject: [PATCH 75/79] Fix latest comments --- node/actors/network/src/gossip/mod.rs | 29 +++------- node/actors/network/src/gossip/runner.rs | 55 +++++++++++-------- .../network/src/mux/transient_stream.rs | 6 -- .../libs/roles/src/attester/messages/batch.rs | 18 ++++-- node/libs/roles/src/attester/tests.rs | 26 ++------- node/libs/roles/src/validator/tests.rs | 8 +-- 6 files changed, 61 insertions(+), 81 deletions(-) diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index b40b2f8c..7242f4a0 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -123,6 +123,7 @@ impl Network { pub(crate) async fn update_batch_qc(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { // TODO This is not a good way to do this, we shouldn't be verifying the QC every time // Can we get only the latest votes? + let attesters = self.genesis().attesters.as_ref().context("attesters")?; loop { let mut sub = self.batch_votes.subscribe(); let votes = sync::changed(ctx, &mut sub) @@ -169,27 +170,15 @@ impl Network { } } - let weight = self - .genesis() - .attesters - .as_ref() - .context("attesters")? - .weight( - &self - .batch_qc - .get(&new_qc.message.number) - .context("last qc")? - .signers, - ); + let weight = attesters.weight( + &self + .batch_qc + .get(&new_qc.message.number) + .context("last qc")? + .signers, + ); - if weight - < self - .genesis() - .attesters - .as_ref() - .context("attesters")? - .threshold() - { + if weight < attesters.threshold() { return Ok(()); }; diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index ede9c0e9..0e3b4abd 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -113,9 +113,13 @@ impl Network { ctx, self.cfg.rpc.push_block_store_state_rate, ); - let push_signature_client = - rpc::Client::::new(ctx, self.cfg.rpc.push_batch_votes_rate); - let push_signature_server = PushBatchVotesServer(self); + let mut push_signature_client: Option> = None; + let mut push_signature_server: Option = None; + if self.genesis().attesters.is_some() { + push_signature_client = + rpc::Client::new(ctx, self.cfg.rpc.push_batch_votes_rate).into(); + push_signature_server = PushBatchVotesServer(self).into(); + }; let push_block_store_state_server = PushBlockStoreStateServer::new(self); let get_block_client = rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate); @@ -127,12 +131,6 @@ impl Network { push_validator_addrs_server, self.cfg.rpc.push_validator_addrs_rate, ) - .add_client(&push_signature_client) - .add_server( - ctx, - push_signature_server, - self.cfg.rpc.push_batch_votes_rate, - ) .add_client(&push_block_store_state_client) .add_server( ctx, @@ -142,6 +140,13 @@ impl Network { .add_client(&get_block_client) .add_server(ctx, &*self.block_store, self.cfg.rpc.get_block_rate) .add_server(ctx, rpc::ping::Server, rpc::ping::RATE); + if let (Some(client), Some(server)) = (&push_signature_client, push_signature_server) { + service = service.add_client(client).add_server( + ctx, + server, + self.cfg.rpc.push_batch_votes_rate, + ); + } if let Some(ping_timeout) = &self.cfg.ping_timeout { let ping_client = rpc::Client::::new(ctx, rpc::ping::RATE); @@ -182,22 +187,24 @@ impl Network { } }); - // Push L1 batch votes updates to peer. - s.spawn::<()>(async { - let mut old = BatchVotes::default(); - let mut sub = self.batch_votes.subscribe(); - sub.mark_changed(); - loop { - let new = sync::changed(ctx, &mut sub).await?.clone(); - let diff = new.get_newer(&old); - if diff.is_empty() { - continue; + if let Some(client) = &push_signature_client { + // Push L1 batch votes updates to peer. + s.spawn::<()>(async { + let mut old = BatchVotes::default(); + let mut sub = self.batch_votes.subscribe(); + sub.mark_changed(); + loop { + let new = sync::changed(ctx, &mut sub).await?.clone(); + let diff = new.get_newer(&old); + if diff.is_empty() { + continue; + } + old = new; + let req = rpc::push_batch_votes::Req(diff); + client.call(ctx, &req, kB).await?; } - old = new; - let req = rpc::push_batch_votes::Req(diff); - push_signature_client.call(ctx, &req, kB).await?; - } - }); + }); + }; // Perform get_block calls to peer. s.spawn::<()>(async { diff --git a/node/actors/network/src/mux/transient_stream.rs b/node/actors/network/src/mux/transient_stream.rs index 25dd2818..17518cdd 100644 --- a/node/actors/network/src/mux/transient_stream.rs +++ b/node/actors/network/src/mux/transient_stream.rs @@ -19,12 +19,6 @@ pub(crate) struct ReadStream(pub(super) sync::ExclusiveLock) #[derive(Debug)] pub(crate) struct WriteStream(pub(super) sync::ExclusiveLock); -/// Error returned by `ReadStream::read` in case the stream has been closed by peer. -#[allow(dead_code)] -#[derive(Debug, thiserror::Error)] -#[error("end of stream")] -pub(crate) struct EndOfStream; - impl ReadStream { /// Reads until buf is full, or end of stream is reached. pub(crate) async fn read_exact( diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index bc1a02fa..d0f7cc57 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -10,7 +10,7 @@ pub struct BatchNumber(pub u64); impl BatchNumber { /// Increment the batch number. pub fn next(&self) -> BatchNumber { - BatchNumber(self.0 + 1) + BatchNumber(self.0.checked_add(1).unwrap()) } } @@ -52,6 +52,9 @@ pub enum BatchQCVerifyError { /// Bad signer set. #[error("signers set doesn't match genesis")] BadSignersSet, + /// No attester committee in genesis. + #[error("No attester committee in genesis")] + AttestersNotInGenesis, } /// Error returned by `BatchQC::add()` if the signature is invalid. @@ -76,7 +79,13 @@ impl BatchQC { pub fn new(message: Batch, genesis: &Genesis) -> anyhow::Result { Ok(Self { message, - signers: Signers::new(genesis.attesters.as_ref().context("attesters")?.len()), + signers: Signers::new( + genesis + .attesters + .as_ref() + .context("no attester committee in genesis")? + .len(), + ), signature: attester::AggregateSignature::default(), }) } @@ -89,7 +98,7 @@ impl BatchQC { let i = genesis .attesters .as_ref() - .expect("attesters set is empty in genesis") // This case should never happen + .context("no attester committee in genesis")? .index(&msg.key) .ok_or(Error::SignerNotInCommittee { signer: Box::new(msg.key.clone()), @@ -106,7 +115,8 @@ impl BatchQC { let attesters = genesis .attesters .as_ref() - .expect("attesters set is empty in genesis"); // This case should never happen + .context("no attester committee in genesis") + .map_err(|_e| Error::AttestersNotInGenesis)?; if self.signers.len() != attesters.len() { return Err(Error::BadSignersSet); } diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index e00a3afe..dc3b8d8c 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -141,19 +141,11 @@ fn test_batch_qc() { let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let mut genesis3 = (*setup1.genesis).clone(); - genesis3.attesters = Committee::new( - setup1 - .genesis - .attesters - .as_ref() - .unwrap() - .iter() - .take(3) - .cloned(), - ) - .unwrap() - .into(); + genesis3.attesters = Committee::new(setup1.genesis.attesters.as_ref().unwrap().get(3).cloned()) + .unwrap() + .into(); let genesis3 = genesis3.with_hash(); + let attesters = setup1.genesis.attesters.as_ref().unwrap(); for i in 0..setup1.attester_keys.len() + 1 { let mut qc = BatchQC::new(make_batch_msg(rng), &setup1.genesis).unwrap(); @@ -162,15 +154,7 @@ fn test_batch_qc() { .unwrap(); } - let expected_weight: u64 = setup1 - .genesis - .attesters - .as_ref() - .unwrap() - .iter() - .take(i) - .map(|w| w.weight) - .sum(); + let expected_weight: u64 = attesters.iter().take(i).map(|w| w.weight).sum(); if expected_weight >= setup1.genesis.attesters.as_ref().unwrap().threshold() { assert!(qc.verify(&setup1.genesis).is_ok()); } else { diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index 60695436..2f0136f0 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -225,9 +225,7 @@ fn test_commit_qc() { } let expected_weight: u64 = setup1 .genesis - .attesters - .as_ref() - .unwrap() + .validators .iter() .take(i) .map(|w| w.weight) @@ -328,9 +326,7 @@ fn test_prepare_qc() { } let expected_weight: u64 = setup1 .genesis - .attesters - .as_ref() - .unwrap() + .validators .iter() .take(n) .map(|w| w.weight) From 5f352665542f8db9544578c5b1650317496d696c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 16 May 2024 16:55:46 -0300 Subject: [PATCH 76/79] Get attester at the beginning of the tests --- node/libs/roles/src/attester/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index dc3b8d8c..208d4ea7 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -155,7 +155,7 @@ fn test_batch_qc() { } let expected_weight: u64 = attesters.iter().take(i).map(|w| w.weight).sum(); - if expected_weight >= setup1.genesis.attesters.as_ref().unwrap().threshold() { + if expected_weight >= attesters.threshold() { assert!(qc.verify(&setup1.genesis).is_ok()); } else { assert_matches!( From d2c82335f36d82ad8d3bd96318ab34241d11636d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 17 May 2024 13:46:41 -0300 Subject: [PATCH 77/79] Fix some last comments --- node/libs/roles/src/attester/messages/batch.rs | 3 +-- node/libs/roles/src/attester/tests.rs | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index d0f7cc57..b6af94d6 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -115,8 +115,7 @@ impl BatchQC { let attesters = genesis .attesters .as_ref() - .context("no attester committee in genesis") - .map_err(|_e| Error::AttestersNotInGenesis)?; + .ok_or(Error::AttestersNotInGenesis)?; if self.signers.len() != attesters.len() { return Err(Error::BadSignersSet); } diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index 208d4ea7..f29ed464 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -141,9 +141,18 @@ fn test_batch_qc() { let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let mut genesis3 = (*setup1.genesis).clone(); - genesis3.attesters = Committee::new(setup1.genesis.attesters.as_ref().unwrap().get(3).cloned()) - .unwrap() - .into(); + genesis3.attesters = Committee::new( + setup1 + .genesis + .attesters + .as_ref() + .unwrap() + .iter() + .take(3) + .cloned(), + ) + .unwrap() + .into(); let genesis3 = genesis3.with_hash(); let attesters = setup1.genesis.attesters.as_ref().unwrap(); From 672046fd2f4fe863709875678efba2ed6a0d068a Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 17 May 2024 13:46:49 -0300 Subject: [PATCH 78/79] Fix CI protobuff job --- .github/workflows/protobuf.yaml | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/.github/workflows/protobuf.yaml b/.github/workflows/protobuf.yaml index 439a30b9..128764b4 100644 --- a/.github/workflows/protobuf.yaml +++ b/.github/workflows/protobuf.yaml @@ -2,7 +2,7 @@ name: protobuf_compatibility on: pull_request: - branches: [ "*" ] + branches: ["*"] push: # protobuf compatibility is a transitive property, # but it requires all the transitions to be checked. @@ -11,7 +11,7 @@ on: # (unless we improve our github setup). # Therefore on post-merge we will execute the # compatibility check as well (TODO: alerting). - branches: [ "main" ] + branches: ["main"] permissions: id-token: write @@ -20,7 +20,7 @@ permissions: env: CARGO_TERM_COLOR: always CARGO_INCREMENTAL: "0" - RUSTFLAGS: "-Dwarnings -C linker=clang -C link-arg=-fuse-ld=lld -C link-arg=-Wl,-z,nostart-stop-gc" + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=lld -C link-arg=-Wl,-z,nostart-stop-gc" RUSTC_WRAPPER: "sccache" SCCACHE_GHA_ENABLED: "true" RUST_BACKTRACE: "1" @@ -33,8 +33,8 @@ jobs: compatibility: runs-on: [ubuntu-22.04-github-hosted-16core] steps: - - uses: mozilla-actions/sccache-action@v0.0.3 - + - uses: mozilla-actions/sccache-action@v0.0.3 + # before - uses: actions/checkout@v4 with: @@ -42,8 +42,7 @@ jobs: path: before fetch-depth: 0 # fetches all branches and tags, which is needed to compute the LCA. - name: checkout LCA - run: - git checkout $(git merge-base $BASE $HEAD) + run: git checkout $(git merge-base $BASE $HEAD) working-directory: ./before - name: compile before run: cargo build --all-targets @@ -53,7 +52,7 @@ jobs: perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/' `find ./before/node/target/debug/build/*/output` | xargs cat > ./before.binpb - + # after - uses: actions/checkout@v4 with: @@ -67,7 +66,7 @@ jobs: perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/' `find ./after/node/target/debug/build/*/output` | xargs cat > ./after.binpb - + # compare - uses: bufbuild/buf-setup-action@v1 with: From d84421c6f5c437489ffb93b94b0eacfd8f3a317e Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 20 May 2024 11:29:10 -0300 Subject: [PATCH 79/79] Fix last comment in gossip runner --- node/actors/network/src/gossip/runner.rs | 54 +++++++++++------------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index 0e3b4abd..af727905 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -113,13 +113,6 @@ impl Network { ctx, self.cfg.rpc.push_block_store_state_rate, ); - let mut push_signature_client: Option> = None; - let mut push_signature_server: Option = None; - if self.genesis().attesters.is_some() { - push_signature_client = - rpc::Client::new(ctx, self.cfg.rpc.push_batch_votes_rate).into(); - push_signature_server = PushBatchVotesServer(self).into(); - }; let push_block_store_state_server = PushBlockStoreStateServer::new(self); let get_block_client = rpc::Client::::new(ctx, self.cfg.rpc.get_block_rate); @@ -140,12 +133,34 @@ impl Network { .add_client(&get_block_client) .add_server(ctx, &*self.block_store, self.cfg.rpc.get_block_rate) .add_server(ctx, rpc::ping::Server, rpc::ping::RATE); - if let (Some(client), Some(server)) = (&push_signature_client, push_signature_server) { - service = service.add_client(client).add_server( + if self.genesis().attesters.as_ref().is_some() { + let push_signature_client = rpc::Client::::new( ctx, - server, self.cfg.rpc.push_batch_votes_rate, ); + let push_signature_server = PushBatchVotesServer(self); + service = service.add_client(&push_signature_client).add_server( + ctx, + push_signature_server, + self.cfg.rpc.push_batch_votes_rate, + ); + // Push L1 batch votes updates to peer. + s.spawn::<()>(async { + let push_signature_client = push_signature_client; + let mut old = BatchVotes::default(); + let mut sub = self.batch_votes.subscribe(); + sub.mark_changed(); + loop { + let new = sync::changed(ctx, &mut sub).await?.clone(); + let diff = new.get_newer(&old); + if diff.is_empty() { + continue; + } + old = new; + let req = rpc::push_batch_votes::Req(diff); + push_signature_client.call(ctx, &req, kB).await?; + } + }); } if let Some(ping_timeout) = &self.cfg.ping_timeout { @@ -187,25 +202,6 @@ impl Network { } }); - if let Some(client) = &push_signature_client { - // Push L1 batch votes updates to peer. - s.spawn::<()>(async { - let mut old = BatchVotes::default(); - let mut sub = self.batch_votes.subscribe(); - sub.mark_changed(); - loop { - let new = sync::changed(ctx, &mut sub).await?.clone(); - let diff = new.get_newer(&old); - if diff.is_empty() { - continue; - } - old = new; - let req = rpc::push_batch_votes::Req(diff); - client.call(ctx, &req, kB).await?; - } - }); - }; - // Perform get_block calls to peer. s.spawn::<()>(async { let state = &mut push_block_store_state_server.state.subscribe();