Skip to content

Commit

Permalink
Merge branch 'main' into weighted_validators
Browse files Browse the repository at this point in the history
  • Loading branch information
ElFantasma committed Mar 19, 2024
2 parents e6425e4 + 67b8c30 commit d9c3826
Show file tree
Hide file tree
Showing 47 changed files with 506 additions and 506 deletions.
8 changes: 7 additions & 1 deletion .github/CONTRIBUTING.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,13 @@
# Contribution Guidelines

Hello! Thanks for your interest in joining the mission to accelerate the mass adoption of crypto for personal
sovereignty! We welcome contributions from anyone on the internet, and are grateful for even the smallest of fixes!
sovereignty! We welcome contributions from anyone on the internet.

Note, however, that all the contributions are subject to review, and not every contribution is guaranteed to be merged.
It is highly advised to reach out to developers (for example, by creating an issue) before preparing a significant
change in the codebase, and explicitly confirm that this contribution will be considered for merge. Otherwise, it is
possible to discover that a feature you have spent some time on does not align with the core team vision or capacity to
maintain a high quality of given submission long term.

## Ways to contribute

Expand Down
47 changes: 8 additions & 39 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
.PHONY: node nodes_config docker_nodes_config node_docker consensus_docker_example clean clean_docker addresses_file blank_configs
NODE?=0
DOCKER_IP=172.12.0.10
IP?=127.0.0.1:3054
EXECUTABLE_NODE_DIR=node/tools
NODES=4
SEED_NODES=1

# Locally run commands

node:
export RUST_LOG=INFO && cd ${EXECUTABLE_NODE_DIR}/nodes-config/node_${NODE} && cargo run -- --database ../../database/node_${NODE}
export RUST_LOG=INFO && cd ${EXECUTABLE_NODE_DIR}/nodes-config/${IP} && cargo run -- --database ../../database/node_${NODE}

nodes_config:
cd ${EXECUTABLE_NODE_DIR} && cargo run --bin localnet_config -- --input-addrs addresses.txt --output-dir nodes-config
Expand All @@ -21,22 +20,7 @@ docker_build_executor:
docker_node_image:
docker build -t consensus-node --target=executor-runtime .

docker_nodes_config:
cd ${EXECUTABLE_NODE_DIR} && cargo run --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config

docker_node:
$(MAKE) docker_node_image
docker run -d --name consensus-node-${NODE} --env NODE_ID="consensus-node-${NODE}" consensus-node

consensus_docker_example:
mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config
cd ${EXECUTABLE_NODE_DIR}/docker-config && rm -rf addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt
$(MAKE) docker_nodes_config
$(MAKE) docker_node_image
docker-compose up -d

stop_docker_nodes:
docker stop consensus-node-1 consensus-node-2
# Kubernetes commands

start_k8s_nodes:
cd ${EXECUTABLE_NODE_DIR} && cargo run --bin deployer generate-config --nodes ${NODES}
Expand All @@ -52,26 +36,11 @@ clean: clean_docker clean_k8s

clean_k8s:
rm -rf ${EXECUTABLE_NODE_DIR}/k8s_configs
kubectl delete deployments --all
kubectl delete pods --all
kubectl delete deployments --all -n consensus
kubectl delete pods --all -n consensus
kubectl delete services --all -n consensus
kubectl delete namespaces consensus

clean_docker:
docker rm -f consensus-node-1
docker rm -f consensus-node-2
docker network rm -f node-net
docker image rm -f consensus-node
docker image rm -f consensus-node:latest
docker image rm -f test-suite

addresses_file:
mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config
cd ${EXECUTABLE_NODE_DIR}/docker-config && \
rm -rf addresses.txt && \
touch addresses.txt && \
for n in $$(seq 0 $$((${NODES} - 1))); do echo 0.0.0.$$n:3054 >> addresses.txt; done

blank_configs: addresses_file docker_node_configs
for n in $$(seq 0 $$((${NODES} - 1))); do \
jq '.publicAddr = "0.0.0.0:3054"' node/tools/docker-config/nodes-config/node_$$n/config.json | \
jq '.gossipStaticOutbound = "[]"' > node/tools/docker-config/nodes-config/node_$$n/config.tmp && \
mv -f node/tools/docker-config/nodes-config/node_$$n/config.tmp node/tools/docker-config/nodes-config/node_$$n/config.json; \
done
34 changes: 0 additions & 34 deletions compose.yaml

This file was deleted.

2 changes: 0 additions & 2 deletions node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,5 @@ wildcard_dependencies = "warn"
redundant_locals = "allow"
needless_pass_by_ref_mut = "allow"
box_default = "allow"
# remove once fix to https://github.com/rust-lang/rust-clippy/issues/11764 is available on CI.
map_identity = "allow"
# &*x is not equivalent to x, because it affects borrowing in closures.
borrow_deref_ref = "allow"
7 changes: 3 additions & 4 deletions node/actors/bft/src/leader/state_machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,9 +167,9 @@ impl StateMachine {
// The previous block was finalized, so we can propose a new block.
_ => {
let fork = &cfg.genesis().fork;
let (parent, number) = match high_qc {
Some(qc) => (Some(qc.header().hash()), qc.header().number.next()),
None => (fork.first_parent, fork.first_block),
let number = match high_qc {
Some(qc) => qc.header().number.next(),
None => fork.first_block,
};
// Defensively assume that PayloadManager cannot propose until the previous block is stored.
if let Some(prev) = number.prev() {
Expand All @@ -189,7 +189,6 @@ impl StateMachine {
.observe(payload.0.len());
let proposal = validator::BlockHeader {
number,
parent,
payload: payload.hash(),
};
(proposal, Some(payload))
Expand Down
7 changes: 0 additions & 7 deletions node/actors/bft/src/leader/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,6 @@ async fn replica_prepare_sanity_yield_leader_prepare() {
.unwrap()
.unwrap();
assert_eq!(leader_prepare.msg.view(), &replica_prepare.view);
assert_eq!(
leader_prepare.msg.proposal.parent,
replica_prepare
.high_vote
.as_ref()
.map(|v| v.proposal.hash()),
);
assert_eq!(
leader_prepare.msg.justification,
util.new_prepare_qc(|msg| *msg = replica_prepare)
Expand Down
2 changes: 1 addition & 1 deletion node/actors/bft/src/replica/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ impl StateMachine {
tracing::info!(
"Finalized block {}: {:#?}",
block.header().number,
block.header().hash(),
block.header().payload,
);
self.config
.block_store
Expand Down
13 changes: 11 additions & 2 deletions node/actors/bft/src/replica/leader_prepare.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,9 @@ pub(crate) enum Error {
/// Invalid message.
#[error("invalid message: {0:#}")]
InvalidMessage(#[source] validator::LeaderPrepareVerifyError),
/// Previous proposal was not finalized.
/// Leader proposed a block that was already pruned from replica's storage.
#[error("leader proposed a block that was already pruned from replica's storage")]
ProposalAlreadyPruned,
/// Oversized payload.
#[error("block proposal with an oversized payload (payload size: {payload_size})")]
ProposalOversizedPayload {
Expand Down Expand Up @@ -110,6 +111,14 @@ impl StateMachine {
});
}

// Replica MUSTN'T vote for blocks which have been already pruned for storage.
// (because it won't be able to persist and broadcast them once finalized).
// TODO(gprusak): it should never happen, we should add safety checks to prevent
// pruning blocks not known to be finalized.
if message.proposal.number < self.config.block_store.subscribe().borrow().first {
return Err(Error::ProposalAlreadyPruned);
}

// ----------- Checking the message --------------

signed_message.verify().map_err(Error::InvalidSignature)?;
Expand Down
64 changes: 31 additions & 33 deletions node/actors/bft/src/replica/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use assert_matches::assert_matches;
use rand::Rng;
use zksync_concurrency::{ctx, scope};
use zksync_consensus_roles::validator::{
self, CommitQC, Payload, PrepareQC, ReplicaCommit, ReplicaPrepare, ViewNumber,
self, CommitQC, Payload, PrepareQC, ReplicaCommit, ReplicaPrepare,
};

/// Sanity check of the happy path.
Expand Down Expand Up @@ -101,10 +101,6 @@ async fn leader_prepare_invalid_leader() {
let (mut util, runner) = UTHarness::new(ctx, 2).await;
s.spawn_bg(runner.run(ctx));

let view = ViewNumber(2);
util.set_view(view);
assert_eq!(util.view_leader(view), util.keys[0].public());

let replica_prepare = util.new_replica_prepare();
assert!(util
.process_replica_prepare(ctx, util.sign(replica_prepare.clone()))
Expand Down Expand Up @@ -167,6 +163,35 @@ async fn leader_prepare_old_view() {
.unwrap();
}

#[tokio::test]
async fn leader_prepare_pruned_block() {
zksync_concurrency::testonly::abort_on_panic();
let ctx = &ctx::test_root(&ctx::RealClock);
scope::run!(ctx, |ctx, s| async {
let (mut util, runner) = UTHarness::new(ctx, 1).await;
s.spawn_bg(runner.run(ctx));

let mut leader_prepare = util.new_leader_prepare(ctx).await;
// We assume default replica state and nontrivial `genesis.fork.first_block` here.
leader_prepare.proposal.number = util
.replica
.config
.block_store
.subscribe()
.borrow()
.first
.prev()
.unwrap();
let res = util
.process_leader_prepare(ctx, util.sign(leader_prepare))
.await;
assert_matches!(res, Err(leader_prepare::Error::ProposalAlreadyPruned));
Ok(())
})
.await
.unwrap();
}

/// Tests that `WriteBlockStore::verify_payload` is applied before signing a vote.
#[tokio::test]
async fn leader_prepare_invalid_payload() {
Expand Down Expand Up @@ -338,33 +363,6 @@ async fn leader_prepare_proposal_when_previous_not_finalized() {
.unwrap();
}

#[tokio::test]
async fn leader_prepare_bad_parent_hash() {
zksync_concurrency::testonly::abort_on_panic();
let ctx = &ctx::test_root(&ctx::RealClock);
scope::run!(ctx, |ctx, s| async {
let (mut util, runner) = UTHarness::new(ctx, 1).await;
s.spawn_bg(runner.run(ctx));

tracing::info!("Produce initial block.");
util.produce_block(ctx).await;
tracing::info!("Make leader propose the next block.");
let mut leader_prepare = util.new_leader_prepare(ctx).await;
tracing::info!("Modify the proposal.parent so that it doesn't match the previous block");
leader_prepare.proposal.parent = Some(ctx.rng().gen());
let res = util.process_leader_prepare(ctx, util.sign(leader_prepare.clone())).await;
assert_matches!(res, Err(leader_prepare::Error::InvalidMessage(
validator::LeaderPrepareVerifyError::BadParentHash { got, want }
)) => {
assert_eq!(want, Some(leader_prepare.justification.high_qc().unwrap().message.proposal.hash()));
assert_eq!(got, leader_prepare.proposal.parent);
});
Ok(())
})
.await
.unwrap();
}

#[tokio::test]
async fn leader_prepare_bad_block_number() {
zksync_concurrency::testonly::abort_on_panic();
Expand Down Expand Up @@ -500,7 +498,7 @@ async fn leader_prepare_reproposal_invalid_block() {
.unwrap();
}

/// Check that replica provides expecte high_vote and high_qc after finalizing a block.
/// Check that replica provides expected high_vote and high_qc after finalizing a block.
#[tokio::test]
async fn leader_commit_sanity_yield_replica_prepare() {
zksync_concurrency::testonly::abort_on_panic();
Expand Down
8 changes: 3 additions & 5 deletions node/actors/bft/src/testonly/fuzz.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,11 +174,9 @@ impl Fuzz for validator::Payload {

impl Fuzz for validator::BlockHeader {
fn mutate(&mut self, rng: &mut impl Rng) {
match rng.gen_range(0..3) {
0 => self.parent = rng.gen(),
1 => self.number = rng.gen(),
2 => self.payload = rng.gen(),
_ => unreachable!(),
match rng.gen_range(0..2) {
0 => self.number = rng.gen(),
_ => self.payload = rng.gen(),
}
}
}
13 changes: 0 additions & 13 deletions node/actors/bft/src/testonly/ut_harness.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,19 +137,6 @@ impl UTHarness {
self.replica.view = view;
}

pub(crate) fn set_view(&mut self, view: ViewNumber) {
self.set_replica_view(view);
self.set_leader_view(view);
}

pub(crate) fn set_leader_view(&mut self, view: ViewNumber) {
self.leader.view = view
}

pub(crate) fn set_replica_view(&mut self, view: ViewNumber) {
self.replica.view = view
}

pub(crate) fn replica_view(&self) -> validator::View {
validator::View {
protocol_version: self.protocol_version(),
Expand Down
Loading

0 comments on commit d9c3826

Please sign in to comment.