From f696992d77aa788fb228c4068a49f97868d8b531 Mon Sep 17 00:00:00 2001 From: Denis Kolegov Date: Wed, 26 Jun 2024 15:56:30 +0200 Subject: [PATCH] Add initial specification in Quint (#135) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds an initial formal specification of the ChonkyBFT protocol in Quint. It also contains tests and experiments results we have performed. --------- Signed-off-by: Denis Kolegov Co-authored-by: Igor Konnov Co-authored-by: Bruno França --- .github/workflows/spec.yaml | 27 + .gitignore | 3 + spec/Makefile | 24 + spec/README.md | 3 + spec/informal-spec/README.md | 11 + spec/informal-spec/fetcher.rs | 15 + spec/informal-spec/proposer.rs | 44 + spec/informal-spec/replica.rs | 256 +++ spec/informal-spec/types.rs | 275 +++ spec/protocol-spec/README.md | 170 ++ spec/protocol-spec/defs.qnt | 9 + spec/protocol-spec/experiments/README.md | 35 + .../experiments/all-invariants.csv | 19 + spec/protocol-spec/experiments/apalache.json | 11 + spec/protocol-spec/experiments/gen-inputs.sh | 17 + spec/protocol-spec/experiments/n6f1b0.qnt | 14 + .../n6f1b0_guided_no_proposing_leader.qnt | 41 + spec/protocol-spec/experiments/n6f1b1.qnt | 14 + .../experiments/n6f1b1_guided_dead_lock.qnt | 45 + .../experiments/n6f1b1_guided_two_blocks.qnt | 32 + spec/protocol-spec/experiments/n6f1b2.qnt | 14 + .../experiments/n6f1b2_boxed.qnt | 140 ++ .../experiments/n6f1b2_guided_agreement.qnt | 35 + spec/protocol-spec/experiments/n6f1b3.qnt | 14 + spec/protocol-spec/experiments/n7f1b0.qnt | 14 + .../n7f1b0_quided_three_blocks.qnt | 66 + .../experiments/quint-verify-parallel.sh | 36 + .../experiments/test-invariants.sh | 30 + spec/protocol-spec/guided_replica.qnt | 142 ++ spec/protocol-spec/main.qnt | 15 + spec/protocol-spec/option.qnt | 46 + spec/protocol-spec/replica.qnt | 1647 +++++++++++++++++ spec/protocol-spec/state_monitor.qnt | 58 + spec/protocol-spec/tests/tests_n6f1b0.qnt | 379 ++++ spec/protocol-spec/tests/tests_n6f1b1.qnt | 360 ++++ spec/protocol-spec/tests/tests_n6f1b2.qnt | 243 +++ spec/protocol-spec/tests/unit_tests.qnt | 306 +++ spec/protocol-spec/twins_n6f1b1.qnt | 34 + spec/protocol-spec/twins_n6f1b2.qnt | 34 + spec/protocol-spec/types.qnt | 184 ++ 40 files changed, 4862 insertions(+) create mode 100644 .github/workflows/spec.yaml create mode 100644 spec/Makefile create mode 100644 spec/README.md create mode 100644 spec/informal-spec/README.md create mode 100644 spec/informal-spec/fetcher.rs create mode 100644 spec/informal-spec/proposer.rs create mode 100644 spec/informal-spec/replica.rs create mode 100644 spec/informal-spec/types.rs create mode 100644 spec/protocol-spec/README.md create mode 100644 spec/protocol-spec/defs.qnt create mode 100644 spec/protocol-spec/experiments/README.md create mode 100644 spec/protocol-spec/experiments/all-invariants.csv create mode 100644 spec/protocol-spec/experiments/apalache.json create mode 100755 spec/protocol-spec/experiments/gen-inputs.sh create mode 100644 spec/protocol-spec/experiments/n6f1b0.qnt create mode 100644 spec/protocol-spec/experiments/n6f1b0_guided_no_proposing_leader.qnt create mode 100644 spec/protocol-spec/experiments/n6f1b1.qnt create mode 100644 spec/protocol-spec/experiments/n6f1b1_guided_dead_lock.qnt create mode 100644 spec/protocol-spec/experiments/n6f1b1_guided_two_blocks.qnt create mode 100644 spec/protocol-spec/experiments/n6f1b2.qnt create mode 100644 spec/protocol-spec/experiments/n6f1b2_boxed.qnt create mode 100644 spec/protocol-spec/experiments/n6f1b2_guided_agreement.qnt create mode 100644 spec/protocol-spec/experiments/n6f1b3.qnt create mode 100644 spec/protocol-spec/experiments/n7f1b0.qnt create mode 100644 spec/protocol-spec/experiments/n7f1b0_quided_three_blocks.qnt create mode 100755 spec/protocol-spec/experiments/quint-verify-parallel.sh create mode 100755 spec/protocol-spec/experiments/test-invariants.sh create mode 100644 spec/protocol-spec/guided_replica.qnt create mode 100644 spec/protocol-spec/main.qnt create mode 100644 spec/protocol-spec/option.qnt create mode 100644 spec/protocol-spec/replica.qnt create mode 100644 spec/protocol-spec/state_monitor.qnt create mode 100644 spec/protocol-spec/tests/tests_n6f1b0.qnt create mode 100644 spec/protocol-spec/tests/tests_n6f1b1.qnt create mode 100644 spec/protocol-spec/tests/tests_n6f1b2.qnt create mode 100644 spec/protocol-spec/tests/unit_tests.qnt create mode 100644 spec/protocol-spec/twins_n6f1b1.qnt create mode 100644 spec/protocol-spec/twins_n6f1b2.qnt create mode 100644 spec/protocol-spec/types.qnt diff --git a/.github/workflows/spec.yaml b/.github/workflows/spec.yaml new file mode 100644 index 00000000..91d51ad0 --- /dev/null +++ b/.github/workflows/spec.yaml @@ -0,0 +1,27 @@ +name: Specification testing + +on: + pull_request + paths: + - 'spec/**' + +jobs: + + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout the repository + uses: actions/checkout@v4 + + - name: Install node + uses: actions/setup-node@v4 + with: + node-version: ">= 18" + check-latest: true + + - name: Install quint + run: npm i @informalsystems/quint -g + + - name: Run test + run: cd spec && make test diff --git a/.gitignore b/.gitignore index 8aef0499..5867a28c 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,6 @@ logs/ # Binaries generated in Docker node/tools/docker_binaries + +# Apalache files +**/_apalache-out diff --git a/spec/Makefile b/spec/Makefile new file mode 100644 index 00000000..daee551d --- /dev/null +++ b/spec/Makefile @@ -0,0 +1,24 @@ +.PHONY: test unit check repl integration smoke-tests + +repl: + cd protocol-spec && echo "init\n step\n step\n step" | quint -r main.qnt::main + +check: + cd protocol-spec && quint typecheck main.qnt + +integration: + cd protocol-spec/tests && quint test ./tests_n6f1b0.qnt + cd protocol-spec/tests && quint test ./tests_n6f1b1.qnt + cd protocol-spec/tests && quint test ./tests_n6f1b2.qnt + +unit: + cd protocol-spec/tests && quint test ./unit_tests.qnt + +test: check repl unit integration smoke-tests + +smoke-tests: + cd protocol-spec/experiments && quint verify --max-steps=1 --invariant=all_invariants n7f1b0.qnt + cd protocol-spec/experiments && quint verify --max-steps=1 --invariant=all_invariants n6f1b3.qnt + cd protocol-spec/experiments && quint verify --max-steps=1 --invariant=all_invariants n6f1b2.qnt + cd protocol-spec/experiments && quint verify --max-steps=1 --invariant=all_invariants n6f1b1.qnt + cd protocol-spec/experiments && quint verify --max-steps=1 --invariant=all_invariants n6f1b0.qnt diff --git a/spec/README.md b/spec/README.md new file mode 100644 index 00000000..2ef8e82f --- /dev/null +++ b/spec/README.md @@ -0,0 +1,3 @@ +# ChonkyBFT's Specification + +This is a formal specification of ChonkyBFT consensus protocol in Quint. diff --git a/spec/informal-spec/README.md b/spec/informal-spec/README.md new file mode 100644 index 00000000..b095cb38 --- /dev/null +++ b/spec/informal-spec/README.md @@ -0,0 +1,11 @@ +# ChonkyBFT Specification + +This is a ChonkyBFT specification in pseudocode. + +There's a couple of considerations that are not described in the pseudo-code: + +- **Network model**. Messages might be delivered out of order, but we don’t guarantee eventual delivery for *all* messages. Actually, our network only guarantees eventual delivery of the most recent message for each type. That’s because each replica only stores the last outgoing message of each type in memory, and always tries to deliver those messages whenever it reconnects with another replica. +- **Garbage collection**. We can’t store all messages, the goal here is to bound the number of messages that each replica stores, in order to avoid DoS attacks. We handle messages like this: + - `NewView` messages are never stored, so no garbage collection is necessary. + - We keep all `Proposal` messages until the proposal (or a proposal with the same block number) is finalized (which means any honest replica having both the `Proposal` and the corresponding `CommitQC`, we assume that any honest replica in that situation will immediately broadcast the block on the gossip network). + - We only store the newest `CommitVote` **and** `TimeoutVote` for each replica. Honest replicas only change views on QCs, so if they send a newer message, they must also have sent a `NewView` on the transition, which means we can just get the QC from that replica. Even if the other replicas don’t receive the QC, it will just trigger a reproposal. \ No newline at end of file diff --git a/spec/informal-spec/fetcher.rs b/spec/informal-spec/fetcher.rs new file mode 100644 index 00000000..076ed3e4 --- /dev/null +++ b/spec/informal-spec/fetcher.rs @@ -0,0 +1,15 @@ +// Asynchronous block fetcher. +// Fetches all the committed blocks that it doesn't have locally from the peers. + +fn on_start(replica_state: &ReplicaState) { + loop { + let next_block_number = replica_state.committed_blocks.len(); + let Some(commit_qc_ = replica_state.high_commit_qc else { continue }; + if commit_qc.vote.block_number < next_block_number { continue } + // this function queries other replicas whether they have a CommittedBlock + // with the given number, and fetches it from them. The fetched block and + // its commit_qc are verified locally. + let block : CommittedBlock = self.fetch_block_from_peers(next_block_number); + self.committed_blocks.push(block); + } +} diff --git a/spec/informal-spec/proposer.rs b/spec/informal-spec/proposer.rs new file mode 100644 index 00000000..12268201 --- /dev/null +++ b/spec/informal-spec/proposer.rs @@ -0,0 +1,44 @@ +// Proposer + +// This is called when the proposer starts. At the beginning of the consensus. +fn on_start(replica_state: &ReplicaState) { + // The proposer has visibility into the replica state. But CANNOT write to it. + let mut cur_view = replica_state.view; + + loop { + // Check if we are in a new view and we are the leader. + // Note that by definition, it's impossible to be the leader for view 0. + // We always let view 0 timeout. + if cur_view < replica_state.view && replica_state.pk == leader(replica_state.view) { + cur_view = replica_state.view; + + // Get the justification for this view. If we have both a commit QC + // and a timeout QC for this view (highly unlikely), we should prefer + // to use the commit QC. + let justification = replica_state.get_justification(cur_view); + + // Get the block number and check if this must be a reproposal. + let (block_number, opt_block_hash) = justification.get_implied_block(); + + // Propose only if you have collected all committed blocks so far. + assert!(block_number == self.committed_blocks.last().map_or(0,|b|b.commit_qc.vote.block_number+1)); + + // Now we create the block. + let block = if opt_block_hash.is_some() { + // There was some proposal last view that at least SUBQUORUM_WEIGHT replicas + // voted for and could have been finalized. We need to repropose it. + None + } else { + // The previous proposal was finalized, so we can propose a new block. + // If we don't have the previous block, this call will fail. This is + // fine as we can just not propose anything and let our turn end. + Some(self.create_proposal(block_number)) + }; + + // Create the proposal message and send it to all replicas + let proposal = Proposal::new(block, justification); + + self.send(proposal); + } + } +} diff --git a/spec/informal-spec/replica.rs b/spec/informal-spec/replica.rs new file mode 100644 index 00000000..02d6ec14 --- /dev/null +++ b/spec/informal-spec/replica.rs @@ -0,0 +1,256 @@ +// Replica + +/// A block with a matching valid certificate. +/// invariants: hash(block) == commit_qc.vote.block_hash +struct CommittedBlock { + block: Block, + commit_qc: CommitQC, +} + +struct ReplicaState { + // The view this replica is currently in. + view: ViewNumber, + // The phase of the replica. Determines if we already voted for a proposal + // this view. + phase: Phase, + // The commit vote with the highest view that this replica has signed, if any. + high_vote: Option, + // The commit quorum certificate with the highest view that this replica + // has observed, if any. + high_commit_qc: Option, + // The timeout quorum certificate with the highest view that this replica + // has observed, if any. + high_timeout_qc: Option, + // A list of every replica (including us) and their respective weights. + committee: Map, + // Our public key. It also acts as our identifier. + pk: ValidatorPubKey, + // A cache of not-yet-committed proposals. + // Collects all received proposals sent by the proposers. + // In real implementation this cache would be actively pruned. + cached_proposals: Map<(BlockNumber,BlockHash),Block>, + // A history of committed blocks. + // invariant: committed_blocks[i].commit_qc.vote.block_number == i + committed_blocks: Vec, +} + +enum Phase { + Prepare, + Commit, + Timeout +} + +// This is called when the replica starts. At the beginning of the consensus. +// It is a loop that takes incoming messages and calls the corresponding +// method for each message. +fn on_start(self) { + // Imagine there's a timer util that just has two states (finished or not) and + // counts down from some given duration. For example, it counts down from 1s. + // If that time elapses, the timer will change state to finished. + // If it is reset before that, it starts counting down again from 1s. + let timer = Timer::new(duration); + + // Get the current view. + let mut cur_view = self.view; + + loop { + // If the view has increased before the timeout, we reset the timer. + if cur_view < self.view { + cur_view = self.view; + timer.reset(); + } + + // If the timer has finished, we send a timeout vote. + // If this is the first view, we immediately timeout. This will force the replicas + // to synchronize right at the beginning and will provide a justification for the + // proposer at view 1. + // If we have already timed out, we don't need to send another timeout vote. + if (timer.is_finished() || cur_view == 0) && self.phase != Phase::Timeout { + let vote = TimeoutVote::new(self.view, + self.high_vote, + self.high_commit_qc); + + // Update our state so that we can no longer vote commit in this view. + self.phase = Phase::Timeout; + + // Send the vote to all replicas (including ourselves). + self.send(vote); + } + + // Try to get a message from the message queue and process it. We don't + // detail the message queue structure since it's boilerplate. + if let Some(message) = message_queue.pop() { + match message { + Proposal(msg) => { + self.on_proposal(msg); + } + Commit(msg) => { + self.on_commit(msg); + } + Timeout(msg) => { + self.on_timeout(msg); + } + NewView(msg) => { + self.on_new_view(msg); + } + } + } + } +} + +fn on_proposal(self, proposal: Proposal) { + // We only allow proposals for the current view if we have not voted in + // it yet. + assert!((proposal.view() == self.view && self.phase == Prepare) || proposal.view() > self.view); + + // We ignore proposals from the wrong leader. + assert!(proposal.leader() == leader(proposal.view())); + + // Check that the proposal is valid. + assert!(proposal.verify()); + + // Get the implied block number and hash (if any). + let (block_number, opt_block_hash) = proposal.justification.get_implied_block(); + + // Vote only if you have collected all committed blocks so far. + assert!(block_number == self.committed_blocks.last().map_or(0,|b|b.commit_qc.vote.block_number+1)); + + // Check if this is a reproposal or not, and do the necessary checks. + // As a side result, get the correct block hash. + let block_hash = match opt_block_hash { + Some(hash) => { + // This is a reproposal. We let the leader repropose blocks without sending + // them in the proposal (it sends only the number + hash). That allows a + // leader to repropose a block without having it stored. + // It is an optimization that allows us to not wait for a leader that has + // the previous proposal stored (which can take 4f views), and to somewhat + // speed up reproposals by skipping block broadcast. + // This only saves time because we have a gossip network running in parallel, + // and any time a replica is able to create a finalized block (by possessing + // both the block and the commit QC) it broadcasts the finalized block (this + // was meant to propagate the block to full nodes, but of course validators + // will end up receiving it as well). + // However, this can be difficult to model and we might want to just + // ignore the gossip network in the formal model. We will still have liveness + // but in the model we'll end up waiting 4f views to get a leader that has the + // previous block before proposing a new one. This is not that bad, since + // then we can be sure that the consensus will continue even if the gossip + // network is failing for some reason. + + // For sanity reasons, we'll check that there's no block in the proposal. + // But this check is completely unnecessary (in theory at least). + assert!(proposal.block.is_none()); + + hash + } + None => { + // This is a new proposal, so we need to verify it (i.e. execute it). + assert!(proposal.block.is_some()); + let block = proposal.block.unwrap(); + // To verify the block, replica just tries to apply it to the current + // state. Current state is the result of applying all the committed blocks until now. + assert!(self.verify_block(block_number, block)); + // We cache the new proposals, waiting for them to be committed. + self.cached_proposals.insert((block_number,proposal.block.hash()),block); + block.hash() + } + }; + + // Update the state. + let vote = CommitVote::new(proposal.view(), block_number, block_hash); + + self.view = proposal.view(); + self.phase = Phase::Commit; + self.high_vote = Some(vote); + match proposal.justification { + Commit(qc) => self.process_commit_qc(Some(qc)), + Timeout(qc) => { + self.process_commit_qc(qc.high_qc()); + self.high_timeout_qc = max(Some(qc), self.high_timeout_qc); + } + }; + + // Send the commit vote to all replicas (including ourselves). + self.send(vote); +} + +// Processed an (already verified) commit_qc received from the network +// as part of some message. It bumps the local high_commit_qc and if +// we have the proposal corresponding to this qc, we append it to the committed_blocks. +fn process_commit_qc(self, qc_opt: Option[CommitQC]) { + if let Some(qc) = qc_opt { + self.high_commit_qc = max(Some(qc), self.high_commit_qc); + let Some(block) = self.cached_proposals.get((qc.vote.block_number,qc.vote.block_hash)) else { return }; + if self.committed_blocks.len()==qc.vote.block_number { + self.committed_blocks.push(CommittedBlock{block,commit_qc:qc}); + } + } +} + +fn on_commit(self, sig_vote: SignedCommitVote) { + // If the vote isn't current, just ignore it. + assert!(sig_vote.view() >= self.view) + + // Check that the signed vote is valid. + assert!(sig_vote.verify()); + + // Store the vote. We will never store duplicate (same view and sender) votes. + // If we already have this vote, we exit early. + assert!(self.store(sig_vote).is_ok()); + + // Check if we now have a commit QC for this view. + if let Some(qc) = self.get_commit_qc(sig_vote.view()) { + self.process_commit_qc(Some(qc)); + self.start_new_view(sig_vote.view() + 1); + } +} + +fn on_timeout(self, sig_vote: SignedTimeoutVote) { + // If the vote isn't current, just ignore it. + assert!(sig_vote.view() >= self.view) + + // Check that the signed vote is valid. + assert!(sig_vote.verify()); + + // Store the vote. We will never store duplicate (same view and sender) votes. + // If we already have this vote, we exit early. + assert!(self.store(sig_vote).is_ok()); + + // Check if we now have a timeout QC for this view. + if let Some(qc) = self.get_timeout_qc(sig_vote.view()) { + self.process_commit_qc(qc.high_qc()); + self.high_timeout_qc = max(Some(qc), self.high_timeout_qc); + self.start_new_view(sig_vote.view() + 1); + } +} + +fn on_new_view(self, new_view: NewView) { + // If the message isn't current, just ignore it. + assert!(new_view.view() >= self.view) + + // Check that the new view is valid. + assert!(new_view.verify()); + + // Update our state. + match new_view.justification { + Commit(qc) => self.process_commit_qc(Some(qc)), + Timeout(qc) => { + self.process_commit_qc(qc.high_qc()); + self.high_timeout_qc = max(Some(qc), self.high_timeout_qc); + } + }; + + if new_view.view() > self.view { + self.start_new_view(new_view.view()); + } +} + +fn start_new_view(self, view: ViewNumber) { + self.view = view; + self.phase = Phase::Prepare; + + // Send a new view message to the other replicas, for synchronization. + let new_view = NewView::new(self.get_justification(view)); + + self.send(new_view); +} diff --git a/spec/informal-spec/types.rs b/spec/informal-spec/types.rs new file mode 100644 index 00000000..16d955ea --- /dev/null +++ b/spec/informal-spec/types.rs @@ -0,0 +1,275 @@ +// The sum of all validator weights. +const TOTAL_WEIGHT = committee.sum_weights(); +// The maximum weight of faulty replicas. +// We want 5*FAULTY_WEIGHT + 1 = TOTAL_WEIGHT +const FAULTY_WEIGHT = (TOTAL_WEIGHT - 1) / 5; +// The weight threshold needed to form a quorum certificate. +const QUORUM_WEIGHT = TOTAL_WEIGHT - FAULTY_WEIGHT; +// The weight threshold needed to trigger a reproposal. +const SUBQUORUM_WEIGHT = TOTAL_WEIGHT - 3 * FAULTY_WEIGHT; + +// Messages + +struct Proposal { + // Block that the leader is proposing, if this is a new proposal. + block: Option, + // What attests to the validity of this message. + justification: Justification, + // Signature of the sender. + sig: Signature, +} + +impl Proposal { + fn view(self) -> ViewNumber { + self.justification.view() + } + + fn verify(self) -> bool { + self.verify_sig() && self.justification.verify() + } +} + +enum Justification { + // This proposal is being proposed after a view where we finalized a block. + // A commit QC is just a collection of commit votes (with at least + // QUORUM_WEIGHT) for the previous view. Note that the commit votes MUST + // be identical. + Commit(CommitQC), + // This proposal is being proposed after a view where we timed out. + // A timeout QC is just a collection of timeout votes (with at least + // QUORUM_WEIGHT) for the previous view. Unlike with the Commit QC, + // timeout votes don't need to be identical. + Timeout(TimeoutQC), +} + +impl Justification { + fn view(self) -> ViewNumber { + match self { + Commit(qc) => qc.view() + 1, + Timeout(qc) => qc.view() + 1, + } + } + + fn verify(self) -> bool { + match self { + Commit(qc) => qc.verify(), + Timeout(qc) => qc.verify(), + } + } + + // This returns the block number that is implied by this justification. + // If the justification requires a block reproposal, it also returns + // the hash of the block that must be reproposed. + fn get_implied_block(self) -> (BlockNumber, Option) { + match self { + Commit(qc) => { + // The previous proposal was finalized, so we can propose a new block. + (qc.block_number + 1, None) + } + Timeout(qc) => { + // Get the high vote of the timeout QC, if it exists. We check if there are + // timeout votes with at least an added weight of SUBQUORUM_WEIGHT, + // that have a high vote field for the same block. A QC can have + // 0, 1 or 2 such blocks. + // If there's only 1 such block, then we say the QC has a high vote. + // If there are 0 or 2 such blocks, we say the QC has no high vote. + let high_vote: Option = qc.high_vote(); + + // Get the high commit QC of the timeout QC. We compare the high QC field of + // all timeout votes in the QC, and get the highest one, if it exists. + let high_qc: Option = qc.high_qc(); + + if high_vote.is_some() + && (high_qc.is_none() || high_vote.block_number > high_qc.block_number) + { + // There was some proposal last view that might have been finalized. + // We need to repropose it. + (high_vote.block_number, Some(high_vote.block_hash)) + } else { + // Either the previous proposal was finalized or we know for certain + // that it couldn't have been finalized. Either way, we can propose + // a new block. + let block_number = match high_qc { + Some(qc) => qc.block_number + 1, + None => 0, + }; + + (block_number, None) + } + } + } + } +} + +struct CommitVote { + // The current view. + view: ViewNumber, + // The number of the block that the replica is committing to. + block_number: BlockNumber, + // The hash of the block that the replica is committing to. + block_hash: BlockHash, +} + +struct SignedCommitVote { + // The commit. + vote: CommitVote, + // Signature of the sender. + sig: Signature, +} + +impl SignedCommitVote { + fn view(self) -> ViewNumber { + self.vote.view() + } + + fn verify(self) -> bool { + self.verify_sig() + } +} + +// If we have identical commit votes with at least QUORUM_WEIGHT +// from different replicas, we can create a commit quorum certificate locally. +struct CommitQC { + // The commit. + vote: CommitVote, + // The aggregate signature of the replicas. We ignore the details here. + // Can be something as simple as a vector of signatures. + agg_sig: AggregateSignature, +} + +impl CommitQC { + fn view(self) -> ViewNumber { + self.vote.view() + } + + fn verify(self) -> bool { + // In here we need to not only check the signature, but also that + // it has enough weight beyond it. + self.verify_agg_sig(QUORUM_WEIGHT) + } +} + +struct TimeoutVote { + // The current view. + view: ViewNumber, + // The commit vote with the highest view that this replica has signed, if any. + high_vote: Option, + // The commit quorum certificate with the highest view that this replica + // has observed, if any. + high_commit_qc: Option, +} + +struct SignedTimeoutVote { + // The timeout. + vote: TimeoutVote, + // Signature of the sender. + sig: Signature, +} + +impl SignedTimeoutVote { + fn view(self) -> ViewNumber { + self.vote.view() + } + + fn verify(self) -> bool { + // Technically, only the signature needs to be verified. But if we wish, there are two invariants that are easy to check: + // self.view() >= self.high_vote.view() && self.high_vote.view() >= self.high_commit_qc.view() + self.verify_sig() + } +} + +// If we have timeout votes with at least QUORUM_WEIGHT for the same +// view from different replicas, we can create a timeout quorum certificate +// locally. Contrary to the commit QC, this QC aggregates different messages. +struct TimeoutQC { + // A collection of the replica timeout votes, indexed by the corresponding + // validator public key. + // There are better data structures for this. This is just for clarity. + votes: Map, + // The aggregate signature of the replicas. We ignore the details here. + // Can be something as simple as a vector of signatures. + agg_sig: AggregateSignature, +} + +impl TimeoutQC { + fn view(self) -> ViewNumber { + self.votes[0].view() + } + + fn verify(self) -> bool { + // Check that all votes have the same view. + for (_, vote) in votes { + if vote.view != self.view() { + return false; + } + } + + // Get the high commit QC of the timeout QC. We compare the high QC field of all + // timeout votes in the QC, and get the highest one, if it exists. + // We then need to verify that it is valid. We don't need to verify the commit QCs + // of the other timeout votes, since they don't have any influence in the result. + if let Some(high_qc) = self.high_qc() { + if !high_qc.verify() { + return false; + } + } + + + // In here we need to not only check the signature, but also that + // it has enough weight beyond it. + self.verify_agg_sig(QUORUM_WEIGHT) + } + + fn high_vote(self) -> Option { + let map: Map = Map::new(); + + for (pk, vote) in votes { + if map.exists(vote) { + map.get_value(vote).add(get_weight(pk)); + } else { + map.insert(vote, get_weight(pk)); + } + } + + let subquorums = Vec::new(); + + for (vote, weight) in map { + if weight >= SUBQUORUM_WEIGHT { + subquorums.push(vote); + } + } + + if subquorums.len() == 1 { + Some(subquorums[0]) + } else { + None + } + } + + fn high_qc(self) -> Option { + let high_qc = None; + + for (_, vote) in votes { + high_qc = max(high_qc, vote.high_commit_qc); + } + + high_qc + } +} + +struct NewView { + // What attests to the validity of this view change. + justification: Justification, + // Signature of the sender. + sig: Signature, +} + +impl NewView { + fn view(self) -> ViewNumber { + self.justification.view() + } + + fn verify(self) -> bool { + self.verify_sig() && self.justification.verify() + } +} diff --git a/spec/protocol-spec/README.md b/spec/protocol-spec/README.md new file mode 100644 index 00000000..a051d819 --- /dev/null +++ b/spec/protocol-spec/README.md @@ -0,0 +1,170 @@ +# ChonkyBFT + +This page summarizes the scope of the Quint specification and the experiments we +have done with it. This Quint specification was prepared by Igor Konnov and +Denis Kolegov (Matter Labs) under a contract with Matter Labs. + +We present the experiments with randomized simulation and bounded model +checking. An inductive proof of safety is work in progress. + +## 1. Protocol configurations + +We are checking the protocol specification for several configurations: + + - [n6f1b0](./n6f1b0.qnt) is the protocol instance for `N=6`, `F=1`, and no + faulty replicas. All six replicas are correct, though some of them may be slow. + This instance must be safe, e.g., `agreement_inv` must not be violated. + + - [n7f1b0](./n7f1b0.qnt) is the protocol instance for `N=7`, `F=1`, and no + faulty replicas. All seven replicas are correct, though some of them may be slow. + This instance lets us test whether more correct replicas may introduce issues. + + - [n6f1b1](./n6f1b1.qnt) is the protocol instance for `N=6`, `F=1`, and one + Byzantine replica. Five correct replicas should be able to tolerate one + Byzantine replica. This instance must be safe, e.g., `agreement_inv` must not + be violated. + + - [n6f1b2](./n6f1b2.qnt) is the protocol instance for `N=6`, `F=1`, and two + Byzantine replicas. Two Byzantine replicas have the critical mass to partition + the correct replicas. Some invariants such as `agreement_inv` are violated in + this instance. + + - [n6f1b3](./n6f1b3.qnt) is the protocol instance for `N=6`, `F=1`, and three + Byzantines replicas. Many invariants are violated in this instance very quickly. + +## 2. Finding examples + +It is usually a good idea to find examples of the states that you believe the +protocol should be able to reach. We produce such examples by checking "falsy" +state invariants, which carry the suffix `_example`: + +|#| Invariant | Description | +|--| ---------------- | ------------------------------------------------------------- | +|1| `phase_commit_example` | Produce an example of a correct replica reaching `PhaseCommit` | +|2| `high_timeout_qc_example` | Produce an example of a non-empty `TimeoutQC` being stored as a high `TimeoutQC` by a correct replica | +|3| `timeout_qc_example` | Produce an execution that lets us to form a `TimeoutQC` | +|4| `commit_qc_example` | Produce an execution that lets us to form a `CommitQC` | +|5| `justification_example` | Produce an execution that produces at least one justification in a non-zero view | +|6| `high_commit_qc_example` | Produce an execution, where a correct replica stores a high `CommitQC` | +|7| `views_over_2_example` | Produce an execution, where a correct replica progresses beyond view 2 | +|8| `blocks_example` | Produce an execution, where a correct replica finalizes one block | +|9| `no_block_gaps_example` | Produce an execution, where a correct replica contains gaps in its finalized blocks | +|10| `two_blocks_example` | Produce an execution, where two correct replicas commit two different blocks (e.g., in different views) | +|11| `two_chained_blocks_example` | Produce an execution, where a correct replica finalizes two blocks | + +There are two ways to find examples, which we consider below. + +**Running the randomized simulator.** With this command, the tool is simply replacing non-deterministic choices such as `any` and `oneOf` with random choice. For simple invariants, the randomized simulator produces examples very quickly: + + ``` + $ quint run --invariant=phase_commit_example ./experiments/n6f1b0.qnt + ``` + +**Running randomized symbolic execution.** With this command, the tool is randomly choosing +actions in `any {...}` but keeps the choice of data in `oneOf` symbolic. It further offloads the task of finding an execution to the symbolic model checker called [Apalache][]. This is how we run symbolic +execution: + + ``` + $ quint verify --random-transitions=true --invariant=phase_commit_example ./experiments/n6f1b0.qnt + ``` + +## 3. State invariants + +In the course of writing the specification, we have developed a number of state +invariants. By simulating and model checking the specification against these +invariants, we have increased our confidence in the correctness of the protocol. + +|#| Invariant | Description | +|--| ---------------- | ------------------------------------------------------------- | +|1| `agreement_inv` | No two correct replicas finalize two different blocks for the same block number | +|2| `global_blockchain_inv` | The states of all correct replicas allow us to build a blockchain | +|3| `committed_blocks_have_justification_inv` | Every block finalized by a correct replica is backed by a justification | +|4| `no_proposal_equivocation_inv` | No correct proposer sends two different proposals in the same view | +|5| `no_commit_equivocation_inv` | No correct proposer sends two different commit votes in the same view | +|6| `no_timeout_equivocation_inv` | No correct proposer sends two different timeout votes in the same view | +|7| `no_new_view_equivocation_inv` | No correct proposer sends two different `NewView` messages in the same view | +|8| `store_signed_commit_all_inv` | Integrity of the signed commit votes that are stored by the correct replicas | +|9| `store_signed_timeout_all_inv` | Integrity of the signed timeout votes that are stored by the correct replicas | +|10| `view_justification_inv` | Every view change is backed by a justification | +|11| `justification_is_supported_inv` | Every justification stored by a correct replica is backed by messages that were sent by other replicas | +|12| `one_high_vote_in_timeout_qc_inv` | A single `TimeoutQC` does not let us form two high votes | +|13| `one_commit_quorum_inv` | It should not be possible to build two quorums for two different block hashes in the same view | +|14| `all_replicas_high_commit_qc_inv` | Integrity of high `CommitQC`'s stored by correct replicas | +|15| `all_replicas_high_timeout_qc_inv` | Integrity of high `TimeoutQC`'s stored by correct replicas | +|16| `msgs_signed_timeout_inv` | Correct replicas do not send malformed timeout votes | +|17| `msgs_signed_commit_inv` | Correct replicas do not send malformed commit votes | +|18| `all_replicas_high_vote_inv` | Correct replicas do not store malformed high votes | +|19| `timeout_high_vote_is_highest_inv` | The high vote is the highest among the votes sent by a replica | + +## 4. Checking state invariants + +**Randomized simulator.** Similar to finding examples, we run the randomized simulator +to check correctness of the protocol against state invariants. For example: + +```sh +quint run --max-steps=20 --max-samples=100000 --invariant=no_commit_equivocation_inv n6f1b3.qnt +``` + +By changing `--max-steps`, we tell the simulator how deep the executions should be, e.g., up to 20 steps. By changing `--max-samples`, we give the simulator the number of executions to try, e.g., 100,000. + +**Randomized symbolic execution.** Symbolic execution offloads the task of +finding an execution to the symbolic model checker, while randomly choosing +actions at each step: + +```sh +quint run --max-steps=20 --random-transitions=true --invariant=no_commit_equivocation_inv n6f1b3.qnt +``` + +Note that there is no need to specify `--max-samples`, as the symbolic model +checker does not enumerate concrete executions, in contrast to the randomized +simulator. Instead, the symbolic model checker enumerates up to 100 *symbolic +executions*. + +**Bounded model checker.** The bounded model checker checks an invariant against +*all* executions up to given depth: + +```sh +quint run --max-steps=20 --invariant=no_commit_equivocation_inv n6f1b3.qnt +``` + +Bounded model checker gives us the most correctness guarantees. These guarantees +come with the price of this command being the slowest one. + +## 5. Checking invariants in parallel + +Randomized simulations and model checking may take quite considerable time. This +is not surprising, as our task is no easier than fuzzing. To utilize multi-core +CPUs and beefy machines, we introduce two parallelization scripts. Both scripts +require [GNU Parallel][]. + +**Script quint-verify-parallel.sh** This script lets us run randomized symbolic +execution across multiple nodes. Since actions are chosen at random, we explore +multiple symbolic paths in parallel. Here is how it is run: + +```sh +./experiments/quint-verify-parallel.sh n6f1b3.qnt all_invariants 30 800 7 +``` + +Type `./experiments/quint-verify-parallel.sh` to read about the meaning of the command-line arguments. + +This command finishes as soon as one of the jobs finds an invariant violation. +You can check the logs in the directory called `./out`. + +**Script test-invariants.sh** This script parallelizes invariant checking across +multiple CPUs: + +```sh +./experiments/test-invariants.sh n6f1b1.qnt all-invariants.csv 30 12 100 +``` + +Type `./experiments/test-invariants.sh` to read about the meaning of the +command-line arguments. The most important argument is the +comma-separated-values (CSV) file that contains the invariants to check and the +server ports to use when running the model checker. + +This script does not terminate immediately, when an invariant has been violated. +Rather, it keeps running, in order to find more violations. + + +[Apalache]: https://github.com/informalsystems/apalache/ +[GNU Parallel]: https://www.gnu.org/software/parallel/ \ No newline at end of file diff --git a/spec/protocol-spec/defs.qnt b/spec/protocol-spec/defs.qnt new file mode 100644 index 00000000..3f2894ce --- /dev/null +++ b/spec/protocol-spec/defs.qnt @@ -0,0 +1,9 @@ +// -*- mode: Bluespec; -*- + +// pure definitions +module defs { + // the maximum of two integers + pure def max(x: int, y: int): int = { + if (x > y) x else y + } +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/README.md b/spec/protocol-spec/experiments/README.md new file mode 100644 index 00000000..34aa6daf --- /dev/null +++ b/spec/protocol-spec/experiments/README.md @@ -0,0 +1,35 @@ +# Experimental Results + +This page summarizes model checking experiments. + +## 1. N=6, F = 1, B = 1 + +This is the configuration [n6f1b1][], which comprises 5 correct replicas and one Byzantine replica. + +We have run randomized symbolic executions with Apalache with the budget of +(feasible) 100 symbolic runs, up to 25 steps in each run. The model checker +checked the following invariants: + + - `agreement_inv` + - `committed_blocks_have_justification_inv` + - `no_proposal_equivocation_inv` + - `no_commit_equivocation_inv` + - `no_timeout_equivocation_inv` + - `no_new_view_equivocation_inv` + - `store_signed_commit_all_inv` + - `store_signed_timeout_all_inv` + - `view_justification_inv` + - `justification_is_supported_inv` + - `one_high_vote_in_timeout_qc_inv` + - `one_commit_quorum_inv` + - `all_replicas_high_commit_qc_inv` + - `all_replicas_high_timeout_qc_inv` + - `all_replicas_high_vote_inv` + - `msgs_signed_timeout_inv` + - `msgs_signed_commit_inv` + - `timeout_high_vote_is_highest_inv` + +After we fixed a technical issue in `agreement_inv`, no further invariant +violations were found. + +[n6f1b1]: ./n6f1b1.qnt \ No newline at end of file diff --git a/spec/protocol-spec/experiments/all-invariants.csv b/spec/protocol-spec/experiments/all-invariants.csv new file mode 100644 index 00000000..2ebef7dd --- /dev/null +++ b/spec/protocol-spec/experiments/all-invariants.csv @@ -0,0 +1,19 @@ +agreement_inv,18100 +view_justification_continuous_inv,18101 +committed_blocks_have_justification_inv,18102 +no_proposal_equivocation_inv,18103 +no_commit_equivocation_inv,18104 +no_timeout_equivocation_inv,18105 +no_new_view_equivocation_inv,18106 +store_signed_commit_all_inv,18107 +store_signed_timeout_all_inv,18108 +view_justification_inv,18109 +justification_is_supported_inv,18110 +one_high_vote_in_timeout_qc_inv,18111 +one_commit_quorum_inv,18112 +all_replicas_high_commit_qc_inv,18113 +all_replicas_high_timeout_qc_inv,18114 +all_replicas_high_vote_inv,18115 +msgs_signed_timeout_inv,18116 +msgs_signed_commit_inv,18117 +timeout_high_vote_is_highest_inv,18118 \ No newline at end of file diff --git a/spec/protocol-spec/experiments/apalache.json b/spec/protocol-spec/experiments/apalache.json new file mode 100644 index 00000000..47e4299d --- /dev/null +++ b/spec/protocol-spec/experiments/apalache.json @@ -0,0 +1,11 @@ +{ + "checker": { + "tuning": { + "search.simulation": true, + "search.simulation.maxRun": 100 + }, + "--discard-disabled": true, + "--no-deadlock": true, + "--write-intermediate": true + } +} diff --git a/spec/protocol-spec/experiments/gen-inputs.sh b/spec/protocol-spec/experiments/gen-inputs.sh new file mode 100755 index 00000000..36d0458e --- /dev/null +++ b/spec/protocol-spec/experiments/gen-inputs.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# +# Generate a CSV file to be used as a job set by parallel. + +if [ "$#" -lt 3 ]; then + echo "Use: $0 number invariant output.csv" + exit 1 +fi + +invariant=$2 +out=$3 +cat >$out <>$out +done \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n6f1b0.qnt b/spec/protocol-spec/experiments/n6f1b0.qnt new file mode 100644 index 00000000..1c818d9e --- /dev/null +++ b/spec/protocol-spec/experiments/n6f1b0.qnt @@ -0,0 +1,14 @@ +// A specification instance for n=6, f=1, and no Byzantine faults +module n6f1b0 { + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4", "n5"), + FAULTY = Set(), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ).* from "../replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n6f1b0_guided_no_proposing_leader.qnt b/spec/protocol-spec/experiments/n6f1b0_guided_no_proposing_leader.qnt new file mode 100644 index 00000000..b26c1374 --- /dev/null +++ b/spec/protocol-spec/experiments/n6f1b0_guided_no_proposing_leader.qnt @@ -0,0 +1,41 @@ +// A specification instance for n=6, f=1, and 1 Byzantine fault +// +// Due to import/export limitations of Quint, use the definitions starting with g_: +// +// quint verify --max-steps=40 --init=g_init --step=g_step --invariant=g_two_chained_blocks_example n6f1b0_no_proposing_leader.qnt +module n6f1b1_two_blocks { + import types.* from "../types" + + import guided_replica( + G_CORRECT = Set("n0", "n1", "n2", "n3", "n4", "n5"), + G_FAULTY = Set(), + G_WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + G_REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + G_N = 6, + G_F = 1, + G_VIEWS = 0.to(5), + G_VALID_BLOCKS = Set("val_b0", "val_b1"), + G_INVALID_BLOCKS = Set("inv_b3"), + G_INPUT_TRACE = [ + ProposerStep("n0"), + OnProposalStep("n0"), OnProposalStep("n1"), + OnTimerIsFinishedStep("n0"), OnTimerIsFinishedStep("n1"), OnTimerIsFinishedStep("n2"), OnTimerIsFinishedStep("n3"), OnTimerIsFinishedStep("n4"), OnTimerIsFinishedStep("n5"), + OnTimeoutStep("n0"), OnTimeoutStep("n0"), OnTimeoutStep("n0"), OnTimeoutStep("n0"), OnTimeoutStep("n0"), + OnNewViewStep({ id: "n1", view: 2 }), OnNewViewStep({ id: "n2", view: 2 }), OnNewViewStep({ id: "n3", view: 2 }), + OnNewViewStep({ id: "n4", view: 2 }), OnNewViewStep({ id: "n5", view: 2 }), + + ProposerStep("n1"), + OnProposalStep("n2"), OnProposalStep("n1"), + OnTimerIsFinishedStep("n0"), OnTimerIsFinishedStep("n1"), OnTimerIsFinishedStep("n2"), OnTimerIsFinishedStep("n3"), OnTimerIsFinishedStep("n4"), + OnTimeoutStep("n0"), OnTimeoutStep("n0"), OnTimeoutStep("n0"), OnTimeoutStep("n0"), OnTimeoutStep("n0"), + + OnNewViewStep({ id: "n1", view: 3 }), + OnNewViewStep({ id: "n2", view: 3 }), + OnNewViewStep({ id: "n3", view: 3 }), + OnNewViewStep({ id: "n4", view: 3 }), + OnNewViewStep({ id: "n5", view: 3 }), + + + ] + ).* from "../guided_replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n6f1b1.qnt b/spec/protocol-spec/experiments/n6f1b1.qnt new file mode 100644 index 00000000..c675ef99 --- /dev/null +++ b/spec/protocol-spec/experiments/n6f1b1.qnt @@ -0,0 +1,14 @@ +// A specification instance for n=6, f=1, and 1 Byzantine fault +module n6f1b1 { + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4"), + FAULTY = Set("n5"), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ).* from "../replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n6f1b1_guided_dead_lock.qnt b/spec/protocol-spec/experiments/n6f1b1_guided_dead_lock.qnt new file mode 100644 index 00000000..8d8f399a --- /dev/null +++ b/spec/protocol-spec/experiments/n6f1b1_guided_dead_lock.qnt @@ -0,0 +1,45 @@ +// A specification instance for n=6, f=1, and 1 Byzantine fault +// +/// Test a liveness issue where some validators have the HighQC but don't have the block payload and have to wait for it, +/// while some other validators have the payload but don't have the HighQC and cannot finalize the block, and therefore +/// don't gossip it, which causes a deadlock unless the one with the HighQC moves on and broadcasts what they have, which +/// should cause the others to finalize the block and gossip the payload to them in turn. +/// +/// This test does not execute all steps, which shows that this concrete deadlock scenario is not feasible. +// +// Due to import/export limitations of Quint, use the definitions starting with g_: +// +// quint verify --max-steps=50 --init=g_init --step=g_step --invariant=g_all_invariants n6f1b1_dead_lock.qnt +module n6f1b1_two_blocks { + import types.* from "../types" + + import guided_replica( + G_CORRECT = Set("n0", "n1", "n2", "n3", "n4", "n5"), + // no random byzantine behaviour + G_FAULTY = Set(), + G_WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + G_REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + G_N = 6, + G_F = 1, + G_VIEWS = 0.to(5), + G_VALID_BLOCKS = Set("val_b0", "val_b1"), + G_INVALID_BLOCKS = Set("inv_b3"), + G_INPUT_TRACE = [ + // n0 proposes a block number 0 + ProposerStep("n0"), + OnProposalStep("n0"), OnProposalStep("n1"), OnProposalStep("n2"), OnProposalStep("n3"), OnProposalStep("n4"), OnProposalStep("n5"), + // node 0 is slow, node 1 finalizes the block number 0 + OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), + // n1 is in view 2 + + // all replicas but n0 timeout + OnTimerIsFinishedStep("n2"), OnTimerIsFinishedStep("n3"), OnTimerIsFinishedStep("n4"), OnTimerIsFinishedStep("n5"), + // n2-n5 catch up on CommitQC. They should add the block number 0 to their finalized blocks. + OnNewViewStep({ id: "n2", view: 2 }), OnNewViewStep({ id: "n3", view: 2 }), + OnNewViewStep({ id: "n4", view: 2 }), OnNewViewStep({ id: "n5", view: 2 }), + // n1 proposes a block number 1 + ProposerStep("n1"), + OnProposalStep("n1"), OnProposalStep("n2"), OnProposalStep("n3"), OnProposalStep("n4"), OnProposalStep("n5"), + ] + ).* from "../guided_replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n6f1b1_guided_two_blocks.qnt b/spec/protocol-spec/experiments/n6f1b1_guided_two_blocks.qnt new file mode 100644 index 00000000..8288ec21 --- /dev/null +++ b/spec/protocol-spec/experiments/n6f1b1_guided_two_blocks.qnt @@ -0,0 +1,32 @@ +// A specification instance for n=6, f=1, and 1 Byzantine fault +// +// Due to import/export limitations of Quint, use the definitions starting with g_: +// +// quint verify --max-steps=50 --init=g_init --step=g_step --invariant=g_two_chained_blocks_example n6f1b1_two_blocks.qnt +module n6f1b1_two_blocks { + import types.* from "../types" + + import guided_replica( + G_CORRECT = Set("n0", "n1", "n2", "n3", "n4"), + G_FAULTY = Set("n5"), + G_WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + G_REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + G_N = 6, + G_F = 1, + G_VIEWS = 0.to(5), + G_VALID_BLOCKS = Set("val_b0", "val_b1"), + G_INVALID_BLOCKS = Set("inv_b3"), + G_INPUT_TRACE = [ + ProposerStep("n0"), + OnProposalStep("n0"), OnProposalStep("n1"), OnProposalStep("n2"), OnProposalStep("n3"), OnProposalStep("n4"), + OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), + OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), + OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), + OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), + OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), + ProposerStep("n0"), + OnProposalStep("n0"), OnProposalStep("n1"), OnProposalStep("n2"), OnProposalStep("n3"), OnProposalStep("n4"), + OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), + ] + ).* from "../guided_replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n6f1b2.qnt b/spec/protocol-spec/experiments/n6f1b2.qnt new file mode 100644 index 00000000..0eee87c9 --- /dev/null +++ b/spec/protocol-spec/experiments/n6f1b2.qnt @@ -0,0 +1,14 @@ +// A specification instance for n=6, f=1, and 2 Byzantine faults +module n6f1b2 { + import replica( + CORRECT = Set("n0", "n1", "n2", "n3"), + FAULTY = Set("n4", "n5"), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + N = 6, + F = 1, + VIEWS = 0.to(3), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ).* from "../replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n6f1b2_boxed.qnt b/spec/protocol-spec/experiments/n6f1b2_boxed.qnt new file mode 100644 index 00000000..600b63ff --- /dev/null +++ b/spec/protocol-spec/experiments/n6f1b2_boxed.qnt @@ -0,0 +1,140 @@ +// A specification instance for n=6, f=1, and 2 Byzantine faults. +// We box the number of different steps. +module n6f1b2_boxed { + import replica( + CORRECT = Set("n0", "n1", "n2", "n3"), + FAULTY = Set("n4", "n5"), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ) as i from "../replica" + + // bounds on the number of steps + pure val MAX_FAULTY_STEPS = 4 + pure val MAX_TIMEOUTS = 5 + pure val MAX_PROPOSER_STEPS = 2 + pure val MAX_ON_PROPOSAL_STEPS = 10 + pure val MAX_ON_COMMIT_STEPS = 10 + pure val MAX_ON_TIMEOUT_STEPS = 5 + pure val MAX_ON_NEW_VIEW_STEPS = 5 + + var nfaulty_steps: int + var ntimeouts: int + var nproposer_steps: int + var ncommit_steps: int + var ntimeout_steps: int + var nproposal_steps: int + var nnew_view_steps: int + + action init: bool = all { + i::init, + nfaulty_steps' = 0, + ntimeouts' = 0, + nproposer_steps' = 0, + ncommit_steps' = 0, + ntimeout_steps' = 0, + nproposal_steps' = 0, + nnew_view_steps' = 0, + } + + action step = { + any { + nondet id = oneOf(i::CORRECT) + any { + all { + i::replica_view' = i::replica_view.set(id, i::replica_state.get(id).view), + any { + all { + ntimeouts < MAX_TIMEOUTS, + i::on_timer_is_finished(id), + ntimeouts' = ntimeouts + 1, + ncommit_steps' = ncommit_steps, + ntimeout_steps' = ntimeout_steps, + nproposal_steps' = nproposal_steps, + nnew_view_steps' = nnew_view_steps, + }, + all { + ntimeout_steps < MAX_ON_TIMEOUT_STEPS, + i::msgs_signed_timeout != Set(), + nondet vote = oneOf(i::msgs_signed_timeout) + i::on_timeout(id, vote), + ntimeouts' = ntimeouts, + ncommit_steps' = ncommit_steps, + ntimeout_steps' = ntimeout_steps + 1, + nproposal_steps' = nproposal_steps, + nnew_view_steps' = nnew_view_steps, + }, + all { + ncommit_steps < MAX_ON_COMMIT_STEPS, + i::msgs_signed_commit != Set(), + nondet signed_vote = oneOf(i::msgs_signed_commit) + i::on_commit(id, signed_vote), + ntimeouts' = ntimeouts, + ncommit_steps' = ncommit_steps + 1, + ntimeout_steps' = ntimeout_steps, + nproposal_steps' = nproposal_steps, + nnew_view_steps' = nnew_view_steps, + }, + all { + nproposal_steps < MAX_ON_PROPOSAL_STEPS, + i::msgs_proposal != Set(), + nondet proposal = oneOf(i::msgs_proposal) + i::on_proposal(id, proposal), + ntimeouts' = ntimeouts, + ncommit_steps' = ncommit_steps, + ntimeout_steps' = ntimeout_steps, + nproposal_steps' = nproposal_steps + 1, + nnew_view_steps' = nnew_view_steps, + }, + all { + nnew_view_steps < MAX_ON_NEW_VIEW_STEPS, + i::msgs_new_view != Set(), + nondet new_view = oneOf(i::msgs_new_view) + i::on_new_view(id, new_view), + ntimeouts' = ntimeouts, + ncommit_steps' = ncommit_steps, + ntimeout_steps' = ntimeout_steps, + nproposal_steps' = nproposal_steps, + nnew_view_steps' = nnew_view_steps + 1, + } + }, + i::leader' = i::leader, + nfaulty_steps' = nfaulty_steps, + nproposer_steps' = nproposer_steps, + }, + all { + nproposer_steps < MAX_PROPOSER_STEPS, + // Non-deterministically choose the next block, use it only for the case of None below. + nondet new_block = oneOf(i::VALID_BLOCKS) + i::proposer_step(id, new_block), + i::replica_view' = i::replica_view, + i::leader' = i::leader, + nfaulty_steps' = nfaulty_steps, + ntimeouts' = ntimeouts, + nproposer_steps' = nproposer_steps + 1, + ncommit_steps' = ncommit_steps, + ntimeout_steps' = ntimeout_steps, + nproposal_steps' = nproposal_steps, + nnew_view_steps' = nnew_view_steps, + }, + }, + all { + nfaulty_steps < MAX_FAULTY_STEPS, + i::faulty_step, + i::unchanged_replica, + i::leader' = i::leader, + nfaulty_steps' = nfaulty_steps + 1, + ntimeouts' = ntimeouts, + nproposer_steps' = nproposer_steps, + ncommit_steps' = ncommit_steps, + ntimeout_steps' = ntimeout_steps, + nproposal_steps' = nproposal_steps, + nnew_view_steps' = nnew_view_steps, + } + } + } +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n6f1b2_guided_agreement.qnt b/spec/protocol-spec/experiments/n6f1b2_guided_agreement.qnt new file mode 100644 index 00000000..a3d5b798 --- /dev/null +++ b/spec/protocol-spec/experiments/n6f1b2_guided_agreement.qnt @@ -0,0 +1,35 @@ +// A specification instance for n=6, f=1, and 2 Byzantine faults +// +// Due to import/export limitations of Quint, use the definitions starting with g_: +// +// quint verify --max-steps=50 --init=g_init --step=g_step --invariant=g_agreement_inv n6f1b2_agreement.qnt +module n6f1b2_agreement { + import types.* from "../types" + + import guided_replica( + G_CORRECT = Set("n0", "n1", "n2", "n3"), + G_FAULTY = Set("n4", "n5"), + G_WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + G_REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + G_N = 6, + G_F = 1, + G_VIEWS = 0.to(5), + G_VALID_BLOCKS = Set("val_b0", "val_b1"), + G_INVALID_BLOCKS = Set("inv_b3"), + G_INPUT_TRACE = [ + FaultyStep, + OnProposalStep("n0"), OnProposalStep("n1"), OnProposalStep("n2"), + FaultyStep, + OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), + FaultyStep, + OnProposalStep("n3"), + OnTimerIsFinishedStep("n1"), OnTimerIsFinishedStep("n2"), OnTimerIsFinishedStep("n3"), + FaultyStep, FaultyStep, FaultyStep, + OnNewViewStep({id:"n1", view:2}), OnNewViewStep({id:"n2", view:2}), OnNewViewStep({id:"n3", view:2}), + FaultyStep, + OnProposalStep("n1"), OnProposalStep("n2"), OnProposalStep("n3"), + FaultyStep, + OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), + ] + ).* from "../guided_replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n6f1b3.qnt b/spec/protocol-spec/experiments/n6f1b3.qnt new file mode 100644 index 00000000..683974bc --- /dev/null +++ b/spec/protocol-spec/experiments/n6f1b3.qnt @@ -0,0 +1,14 @@ +// A specification instance for n=6, f=1, and 3 Byzantine faults +module n6f1b0 { + import replica( + CORRECT = Set("n0", "n1", "n2"), + FAULTY = Set("n3", "n4", "n5"), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ).* from "../replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n7f1b0.qnt b/spec/protocol-spec/experiments/n7f1b0.qnt new file mode 100644 index 00000000..b1beb6d7 --- /dev/null +++ b/spec/protocol-spec/experiments/n7f1b0.qnt @@ -0,0 +1,14 @@ +// A specification instance for n=6, f=1, and no Byzantine faults +module n7f1b0 { + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4", "n5", "n5"), + FAULTY = Set(), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1, "n6"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5", "n6"->"n6"), + N = 7, + F = 1, + VIEWS = 0.to(3), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ).* from "../replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/n7f1b0_quided_three_blocks.qnt b/spec/protocol-spec/experiments/n7f1b0_quided_three_blocks.qnt new file mode 100644 index 00000000..b0ae55e6 --- /dev/null +++ b/spec/protocol-spec/experiments/n7f1b0_quided_three_blocks.qnt @@ -0,0 +1,66 @@ +// A specification instance for n=7, f=1, and 0 byzantine nodes. +// +// Due to import/export limitations of Quint, use the definitions starting with g_: +// +// JVM_ARGS=-Xmx16G quint verify --max-steps=160 --init=g_init --step=g_step --invariant=g_all_invariants n7f1b0_three_blocks.qnt +module n6f1b1_two_blocks { + import types.* from "../types" + + import guided_replica( + G_CORRECT = Set("n0", "n1", "n2", "n3", "n4", "n5", "n6"), + // no random byzantine behaviour + G_FAULTY = Set(), + G_WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1, "n6"->1), + G_REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5", "n6"->"n6"), + G_N = 7, + G_F = 1, + G_VIEWS = 0.to(5), + G_VALID_BLOCKS = Set("val_b0", "val_b1"), + G_INVALID_BLOCKS = Set("inv_b3"), + G_INPUT_TRACE = [ + + ProposerStep("n1"), + + OnProposalStep("n0"), + OnProposalStep("n1"), + OnProposalStep("n2"), + OnProposalStep("n3"), + OnProposalStep("n4"), + OnProposalStep("n5"), + + OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), + OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), + OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), + OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), + OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), + OnCommitStep("n5"), OnCommitStep("n5"), OnCommitStep("n5"), OnCommitStep("n5"), OnCommitStep("n5"), OnCommitStep("n5"), + + ProposerStep("n4"), + + OnProposalStep("n0"), + OnProposalStep("n1"), + OnProposalStep("n2"), + OnProposalStep("n3"), + OnProposalStep("n4"), + OnProposalStep("n5"), + + OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), + OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), OnCommitStep("n1"), + OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), OnCommitStep("n2"), + OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), OnCommitStep("n3"), + OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), OnCommitStep("n4"), + OnCommitStep("n5"), OnCommitStep("n5"), OnCommitStep("n5"), OnCommitStep("n5"), OnCommitStep("n5"), OnCommitStep("n5"), + + ProposerStep("n5"), + + OnProposalStep("n0"), + OnProposalStep("n1"), + OnProposalStep("n2"), + OnProposalStep("n3"), + OnProposalStep("n4"), + OnProposalStep("n5"), + + OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), OnCommitStep("n0"), + ] + ).* from "../guided_replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/quint-verify-parallel.sh b/spec/protocol-spec/experiments/quint-verify-parallel.sh new file mode 100755 index 00000000..8907eb50 --- /dev/null +++ b/spec/protocol-spec/experiments/quint-verify-parallel.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# +# Check a state invariant in parallel with randomized symbolic execution. +# Igor Konnov, 2024 + +if [ "$#" -lt 5 ]; then + echo "Use: $0 spec.qnt invariant max-steps max-runs max-parallel-jobs [init] [step]" + echo "" + echo " - spec.qnt is the specification to check" + echo " - invariant is the invariant to check" + echo " - max-steps is the maximal number of steps every run may have" + echo " - max-runs is the maximal number of symbolic runs in total to try across all jobs" + echo " - max-parallel-jobs is the maximum of jobs to run in parallel" + echo " - init is the initialization action" + echo " - step is the step action" + exit 1 +fi + +# https://stackoverflow.com/questions/242538/unix-shell-script-find-out-which-directory-the-script-file-resides +SCRIPT=$(readlink -f "$0") +# Absolute path this script is in, thus /home/user/bin +BASEDIR=$(dirname "$SCRIPT") + +spec=$1 +invariant=$2 +max_steps=$3 +max_runs=$4 +max_jobs=$5 +init=${6:-"init"} +step=${7:-"step"} + +csv=${invariant}_${max_steps}_${max_runs}.csv + +# since `quint verify --random-transitions=true` tries 100 symbolic runs, divide `max_runs` by 100 and compensate for rounding +${BASEDIR}/gen-inputs.sh $((max_runs / 100 + 1)) ${invariant} ${csv} +${BASEDIR}/test-invariants.sh ${spec} ${csv} ${max_steps} ${max_jobs} 1 ${init} ${step} \ No newline at end of file diff --git a/spec/protocol-spec/experiments/test-invariants.sh b/spec/protocol-spec/experiments/test-invariants.sh new file mode 100755 index 00000000..dbe2e662 --- /dev/null +++ b/spec/protocol-spec/experiments/test-invariants.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +if [ "$#" -lt 5 ]; then + echo "Use: $0 spec.qnt input.csv max-steps max-jobs max-failing-jobs [init] [step]" + echo " - spec.qnt is the specification to check" + echo " - input.csv is the experiments file that contains one pair per line: invariant,port" + echo " - max-steps is the maximal number of protocol steps to check, e.g., 30" + echo " - max-jobs is the maximal number of jobs to run in parallel, e.g., 16" + echo " - max-failing-jobs is the maximal number of jobs to fail" + echo " - init it the initial action, by default: init" + echo " - step it the step action, by default: step" + exit 1 +fi + +spec=$1 +input=$2 +max_steps=$3 +max_jobs=$4 +max_failing_jobs=$5 +init=${6:-"init"} +step=${7:-"step"} + +# https://lists.defectivebydesign.org/archive/html/bug-parallel/2017-04/msg00000.html +export LANG= LC_ALL= LC_CTYPE= + +# set -j to the number of CPUs - 1 +parallel -j ${max_jobs} -v --delay 1 --halt now,fail=${max_failing_jobs} --results out --colsep=, -a ${input} \ + quint verify --max-steps=${max_steps} --init=${init} --step=${step} --random-transitions=true \ + --apalache-config=apalache.json \ + --server-endpoint=localhost:{2} --invariant={1} --verbosity=3 ${spec} \ No newline at end of file diff --git a/spec/protocol-spec/guided_replica.qnt b/spec/protocol-spec/guided_replica.qnt new file mode 100644 index 00000000..0353e59c --- /dev/null +++ b/spec/protocol-spec/guided_replica.qnt @@ -0,0 +1,142 @@ +// A restriction of replica.qnt to guide the model checker's search +module guided_replica { + import types.* from "./types" + import option.* from "./option" + + // the input trace to restrict the model checker's search space + const G_INPUT_TRACE: List[StepKind] + + // the same parameters as in replica + const G_WEIGHTS: ReplicaId -> Weight + const G_REPLICA_KEYS: ReplicaId -> ReplicaKey + const G_N: int + const G_F: int + const G_CORRECT: Set[ReplicaId] + const G_FAULTY: Set[ReplicaId] + const G_VIEWS: Set[ViewNumber] + const G_VALID_BLOCKS: Set[Block] + const G_INVALID_BLOCKS: Set[Block] + + import replica( + WEIGHTS = G_WEIGHTS, + REPLICA_KEYS = G_REPLICA_KEYS, + N = G_N, + F = G_F, + CORRECT = G_CORRECT, + FAULTY = G_FAULTY, + VIEWS = G_VIEWS, + VALID_BLOCKS = G_VALID_BLOCKS, + INVALID_BLOCKS = G_INVALID_BLOCKS + ).* from "./replica" + + // invariants + val g_all_invariants = all_invariants + val g_agreement_inv = agreement_inv + val g_committed_blocks_have_justification_inv = committed_blocks_have_justification_inv + val g_no_proposal_equivocation_inv = no_proposal_equivocation_inv + val g_no_commit_equivocation_inv = no_commit_equivocation_inv + val g_no_timeout_equivocation_inv = no_timeout_equivocation_inv + val g_no_new_view_equivocation_inv = no_new_view_equivocation_inv + val g_store_signed_commit_all_inv = store_signed_commit_all_inv + val g_store_signed_timeout_all_inv = store_signed_timeout_all_inv + val g_view_justification_inv = view_justification_inv + val g_justification_is_supported_inv = justification_is_supported_inv + val g_one_high_vote_in_timeout_qc_inv = one_high_vote_in_timeout_qc_inv + val g_one_commit_quorum_inv = one_commit_quorum_inv + val g_all_replicas_high_commit_qc_inv = all_replicas_high_commit_qc_inv + val g_all_replicas_high_timeout_qc_inv = all_replicas_high_timeout_qc_inv + val g_msgs_signed_timeout_inv = msgs_signed_timeout_inv + val g_msgs_signed_commit_inv = msgs_signed_commit_inv + val g_all_replicas_high_vote_inv = all_replicas_high_vote_inv + val g_timeout_high_vote_is_highest_inv = timeout_high_vote_is_highest_inv + + // examples + val g_two_chained_blocks_example = two_chained_blocks_example + val g_one_block_example = one_block_example + + // remaining steps to replay + var to_replay: List[StepKind] + + // initialize the replicas together with the trace to replay + action g_init: bool = all { + init, + to_replay' = G_INPUT_TRACE, + } + + // execute the same actions as in replica::step, but restrict their choice with to_replay + action g_step = { + any { + // nothing to replay + all { + length(to_replay) == 0 or to_replay[0] == AnyStep, + to_replay' = if (length(to_replay) == 0) to_replay else tail(to_replay), + step, + }, + // steps to replay + all { + length(to_replay) > 0, + to_replay' = tail(to_replay), + val expected_step = head(to_replay) + any { + all { + expected_step == FaultyStep, + faulty_step, + unchanged_replica, + leader' = leader, + }, + nondet id = oneOf(CORRECT) + any { + all { + replica_view' = replica_view.set(id, replica_state.get(id).view), + on_timer_is_finished(id), + expected_step == OnTimerIsFinishedStep(id), + leader' = leader, + }, + all { + replica_view' = replica_view.set(id, replica_state.get(id).view), + msgs_signed_commit != Set(), + nondet signed_vote = oneOf(msgs_signed_commit) + on_commit(id, signed_vote), + expected_step == OnCommitStep(id), + leader' = leader, + }, + all { + replica_view' = replica_view.set(id, replica_state.get(id).view), + msgs_signed_timeout != Set(), + nondet vote = oneOf(msgs_signed_timeout) + on_timeout(id, vote), + expected_step == OnTimeoutStep(id), + leader' = leader, + }, + all { + replica_view' = replica_view.set(id, replica_state.get(id).view), + msgs_proposal != Set(), + nondet proposal = oneOf(msgs_proposal) + on_proposal(id, proposal), + expected_step == OnProposalStep(id), + leader' = leader, + }, + all { + replica_view' = replica_view.set(id, replica_state.get(id).view), + msgs_new_view != Set(), + nondet new_view = oneOf(msgs_new_view) + all { + on_new_view(id, new_view), + expected_step == OnNewViewStep({ id: id, view: new_view.justification.view() }), + }, + leader' = leader, + }, + all { + // Non-deterministically choose the next block, use it only for the case of None below. + nondet new_block = oneOf(VALID_BLOCKS) + proposer_step(id, new_block), + replica_view' = replica_view, + leader' = leader, + expected_step == ProposerStep(id), + }, + }, + } + } + } + } +} \ No newline at end of file diff --git a/spec/protocol-spec/main.qnt b/spec/protocol-spec/main.qnt new file mode 100644 index 00000000..e5f6cfa5 --- /dev/null +++ b/spec/protocol-spec/main.qnt @@ -0,0 +1,15 @@ +/// A few handy small models that are good for debugging and inspection +module main { + // A specification instance for n=6, f=1, and 1 Byzantine fault + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4", "n5"), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + FAULTY = Set(), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ) .* from "./replica" +} \ No newline at end of file diff --git a/spec/protocol-spec/option.qnt b/spec/protocol-spec/option.qnt new file mode 100644 index 00000000..3e061ca4 --- /dev/null +++ b/spec/protocol-spec/option.qnt @@ -0,0 +1,46 @@ +// -*- mode: Bluespec; -*- + +// a typical option type, defined as a sum type in Quint +module option { + /// A polymorphic option type + type Option[a] = + | Some(a) + | None + + /// is `opt` of the shape `Some(...)` + pure def is_some(opt: Option[a]): bool = { + match (opt) { + | Some(_) => true + | None => false + } + } + + // is `opt` of the shape `None` + pure def is_none(opt: Option[a]): bool = { + not(is_some(opt)) + } + + // get `a` when `opt` is `Some(a)`; otherwise, get `default` + pure def unwrap_or(opt: Option[a], default: a): a = { + match (opt) { + | Some(value) => value + | None => default + } + } + + // if is Some(e), test whether `pred(e)` holds true + def option_has(opt: Option[a], pred: a => bool): bool = { + match (opt) { + | None => true + | Some(e) => pred(e) + } + } + + // return Some(mapper(e)) if opt is Some(e); otherwise None + def option_map(opt: Option[a], mapper: a => b): Option[b] = { + match (opt) { + | None => None + | Some(e) => Some(mapper(e)) + } + } +} \ No newline at end of file diff --git a/spec/protocol-spec/replica.qnt b/spec/protocol-spec/replica.qnt new file mode 100644 index 00000000..c7bf1002 --- /dev/null +++ b/spec/protocol-spec/replica.qnt @@ -0,0 +1,1647 @@ +// -*- mode: Bluespec; -*- + +module replica { + import types.* from "./types" + import defs.* from "./defs" + import option.* from "./option" + + // Weights of all replicas. + const WEIGHTS: ReplicaId -> Weight + // The keys of the replicas, normally, one unique key per replica. + // For modeling byzantine behaviour via twin replicas use the same keys. + const REPLICA_KEYS: ReplicaId -> ReplicaKey + // The number of all replicas. + const N: int + // Byzantine replica number threshold + const F: int + // Identities of the correct replicas. + const CORRECT: Set[ReplicaId] + // Identities of the faulty replicas. + const FAULTY: Set[ReplicaId] + // All replicas. + pure val REPLICAS = CORRECT.union(FAULTY) + // Check that threshold assumptions hold. + assume FIVE_F = (N >= 5 * F + 1 and F >= 0) + assume REPLICA_SIZE = (N == REPLICAS.size() and F == FAULTY.size()) + + // The sum of all validator weights. + pure val TOTAL_WEIGHT: Weight = WEIGHTS.keys().fold(0, (s, id) => s + WEIGHTS.get(id)); + // The maximum weight of faulty replicas. + // We want 5*FAULTY_WEIGHT + 1 = TOTAL_WEIGHT + pure val FAULTY_WEIGHT: Weight = (TOTAL_WEIGHT - 1) / 5; + // The weight threshold needed to form a quorum certificate. + pure val QUORUM_WEIGHT: Weight = TOTAL_WEIGHT - FAULTY_WEIGHT; + // The weight threshold needed to trigger a reproposal. + pure val SUBQUORUM_WEIGHT: Weight = TOTAL_WEIGHT - 3 * FAULTY_WEIGHT; + + // produce replica's signature, just its key for specification purposes + pure def sig_of_id(id: ReplicaId): ReplicaKey = REPLICA_KEYS.get(id) + + // The set of all views. + const VIEWS: Set[ViewNumber] + + // the set of all blocks + const VALID_BLOCKS: Set[Block] + const INVALID_BLOCKS: Set[Block] + pure val ALL_BLOCKS = VALID_BLOCKS.union(INVALID_BLOCKS) + assume DISJOINT_BLOCKS = (VALID_BLOCKS.intersect(INVALID_BLOCKS) == Set()) + + var replica_state: ReplicaId -> ReplicaState + // the leader function from the view to the replicas identities + var leader: ViewNumber -> ReplicaKey + // an internal variable to handle timeouts + var replica_view: ReplicaId -> ViewNumber + // the current view of the proposer (only reading the replica state) + var proposer_view: ReplicaId -> ViewNumber + + // the set of all Timeout messages sent in the past + var msgs_signed_timeout: Set[SignedTimeoutVote] + // the set of all SignedCommitVote messages sent in the past + var msgs_signed_commit: Set[SignedCommitVote] + // the set of all NewView messages sent in the past + var msgs_new_view: Set[NewView] + // the set of all Proposal messages sent in the past + var msgs_proposal: Set[Proposal] + + // vote storage for each correct replica + var store_signed_timeout: ReplicaId -> Set[SignedTimeoutVote] + var store_signed_commit: ReplicaId -> Set[SignedCommitVote] + + // justifications stored by a replica + var ghost_justifications: (ReplicaId, ViewNumber) -> Justification + // save the last executed action + var ghost_step: StepKind + + val ALL_FAULTY_SIGNED_COMMIT_VOTES = + tuples(VIEWS, FAULTY, ALL_BLOCKS, VIEWS) + .map( ((v, r, b, n)) => + { + vote:{ + view: v, block_number: n, block_hash: b + }, + sig: sig_of_id(r) + }) + + // This is a simple deterministic version that initializes + // everything to the "genesis" state. + // In general, we should start in an arbitrary replica_state that + // satisfies certain conditions. + // TODO: in the future, we will run the system in an intermediate state: + // https://github.com/dnkolegov/chonkybft-spec/issues/23 + action init = { + // non-deterministically choose the leader function + nondet ldr = VIEWS.setOfMaps(REPLICAS.map(id => sig_of_id(id))).oneOf() + init_view_1_with_leader(ldr) + } + + action init_view_0_with_leader(ldr: ViewNumber -> ReplicaKey): bool = all { + replica_state' = CORRECT.mapBy(id => { + view: 0, + high_vote: COMMIT_VOTE_NONE, + high_commit_qc: HIGH_COMMIT_QC_NONE, + high_timeout_qc: HIGH_TIMEOUT_QC_NONE, + phase: PhaseTimeout, + cached_proposals: Set(), + committed_blocks: [], + }), + replica_view' = CORRECT.mapBy(id => 0), + proposer_view' = CORRECT.mapBy(id => 0), + // all correct replicas send TimeoutVote as their first step + msgs_signed_timeout' = CORRECT.map(id => { + vote: { view: 0, high_vote: COMMIT_VOTE_NONE, high_commit_qc: HIGH_COMMIT_QC_NONE }, + sig: sig_of_id(id) + }), + msgs_signed_commit' = Set(), + msgs_new_view' = Set(), + msgs_proposal' = Set(), + store_signed_timeout' = CORRECT.mapBy(id => Set()), + store_signed_commit' = CORRECT.mapBy(id => Set()), + ghost_justifications' = Map(), + leader' = ldr, + ghost_step' = InitStep, + } + + // start from a bootstrapped state in view 1 + action init_view_1_with_leader(ldr: ViewNumber -> ReplicaKey): bool = { + // An initial quorum certificate. + // We use all the correct + faulty replicas. Otherwise, when a quorum + // of correct replicas is impossible, e.g., the actual number of faults is above F, + // we cannot reach this state. + pure val init_timeout_qc: TimeoutQC = { + votes: REPLICAS.map(id => sig_of_id(id)).mapBy(_ => { + view: 0, + high_vote: COMMIT_VOTE_NONE, + high_commit_qc: HIGH_COMMIT_QC_NONE + }), + agg_sig: REPLICAS.map(id => sig_of_id(id)), + ghost_view: 0, + } + pure val timeout_votes0 = REPLICAS.map(id => sig_of_id(id)).map(sig => { + sig: sig, + vote: { view: 0, high_vote: COMMIT_VOTE_NONE, high_commit_qc: HIGH_COMMIT_QC_NONE}, + }) + all { + replica_state' = CORRECT.mapBy(id => { + view: 1, + high_vote: COMMIT_VOTE_NONE, + high_commit_qc: HIGH_COMMIT_QC_NONE, + high_timeout_qc: Some(init_timeout_qc), + phase: PhasePrepare, + cached_proposals: Set(), + committed_blocks: [], + }), + replica_view' = CORRECT.mapBy(id => 0), + proposer_view' = CORRECT.mapBy(id => 0), + // all replicas have sent TimeoutVote in view 0 + msgs_signed_timeout' = timeout_votes0, + msgs_new_view' = CORRECT.map(id => { + { sig: sig_of_id(id), justification: Timeout(init_timeout_qc) } + }), + msgs_signed_commit' = Set(), + msgs_proposal' = Set(), + store_signed_timeout' = CORRECT.mapBy(id => { + // the leader stores all timeout votes, the others receive NewView + if (sig_of_id(id) == ldr.get(0)) timeout_votes0 else Set() + }), + store_signed_commit' = CORRECT.mapBy(id => Set()), + // all correct correct replicas have Timeout(init_timeout_qc) + ghost_justifications' = + tuples(CORRECT, Set(1)).mapBy(((id, view)) => Timeout(init_timeout_qc)), + leader' = ldr, + ghost_step' = InitStep, + } + } + + // a single system step, including correct replicas & proposers, and faulty replicas & proposers + action step = { + any { + correct_step, + all { + faulty_step, + unchanged_replica, + leader' = leader, + } + } + } + + // a single step + the fetcher algorithm + action step_with_fetcher = { + any { + step, + all { + nondet id = oneOf(CORRECT) + fetcher_step(id), + leader' = leader, + }, + } + } + + // same as step but correct replicas never timeout + action step_no_timeout = { + any { + nondet id = oneOf(CORRECT) + any { + all { + replica_view' = replica_view.set(id, replica_state.get(id).view), + replica_step_no_timeout(id), + leader' = leader, + }, + all { + // Non-deterministically choose the next block, use it only for the case of None below. + nondet new_block = oneOf(VALID_BLOCKS) + proposer_step(id, new_block), + replica_view' = replica_view, + leader' = leader, + }, + }, + all { + faulty_step, + unchanged_replica, + leader' = leader, + } + } + } + + // same as step but no faulty replicas (all FAULTY replicas are halted) + action correct_step = { + nondet id = oneOf(CORRECT) + any { + all { + replica_view' = replica_view.set(id, replica_state.get(id).view), + replica_step(id), + leader' = leader, + }, + all { + // Non-deterministically choose the next block, use it only for the case of None below. + nondet new_block = oneOf(VALID_BLOCKS) + proposer_step(id, new_block), + replica_view' = replica_view, + leader' = leader, + }, + } + } + + // a single step by multiple Byzantine replicas + action faulty_step: bool = all { + FAULTY != Set(), + all { + nondet senders = FAULTY.powerset().oneOf() + nondet commit_view = VIEWS.oneOf() + nondet block_hash = ALL_BLOCKS.oneOf() + nondet block_number = VIEWS.oneOf() + val signed_commits = senders.map(s => { + vote:{ + view: commit_view, block_number: block_number, block_hash: block_hash + }, + sig: s + }) + msgs_signed_commit' = msgs_signed_commit.union(signed_commits), + }, + all { + // non-deterministically produce a timeout vote + nondet msg_view = VIEWS.oneOf() + nondet sig = oneOf(FAULTY) + nondet commit_vote: SignedCommitVote = oneOf(msgs_signed_commit.union(ALL_FAULTY_SIGNED_COMMIT_VOTES)) + nondet commit_qc_view = oneOf(VIEWS) + nondet commit_qc_block_number = oneOf(VIEWS) + nondet commit_qc_block_hash = oneOf(ALL_BLOCKS) + nondet commit_qc_agg_sig = REPLICAS.powerset().oneOf() + val commit_qc_vote: CommitVote = { + view: commit_qc_view, + block_number: commit_qc_block_number, + block_hash: commit_qc_block_hash + } + val commit_qc = { + vote: commit_qc_vote, + agg_sig: commit_qc_agg_sig + } + nondet has_high_vote = oneOf(Bool) + nondet has_commit_qc = oneOf(Bool) + val signed_timeout_vote = { + vote: { + view: msg_view, + high_vote: if (has_high_vote) Some(commit_vote.vote) else COMMIT_VOTE_NONE, + high_commit_qc: if (has_commit_qc) Some(commit_qc) else HIGH_COMMIT_QC_NONE, + }, + sig: sig, + } + all { + // faulty replicas cannot forge the signatures of the correct replicas, enforce that + commit_qc_agg_sig.forall(id => or { + FAULTY.contains(id), + msgs_signed_commit.exists(sv => sv.sig == id and sv.vote == commit_qc_vote), + }), + // add the message by a faulty replica + msgs_signed_timeout' = msgs_signed_timeout.union(Set(signed_timeout_vote)), + } + + }, + all { + nondet msg_view = VIEWS.oneOf() + nondet sig = oneOf(FAULTY) + nondet justification_case = Set("commit", "timeout").oneOf() + // introduce a commit qc + nondet commit_qc_view = oneOf(VIEWS) + nondet commit_qc_block_number = oneOf(VIEWS) + nondet commit_qc_block_hash = oneOf(ALL_BLOCKS) + nondet commit_qc_agg_sig = REPLICAS.powerset().oneOf() + val commit_qc_vote: CommitVote = { + view: commit_qc_view, block_number: commit_qc_block_number, + block_hash: commit_qc_block_hash + } + val commit_qc = { + vote: commit_qc_vote, agg_sig: commit_qc_agg_sig, + } + // introduce a timeout qc + nondet timeout_qc_view = VIEWS.oneOf() + nondet timeout_qc_voters = REPLICAS.powerset().oneOf() + nondet timeout_qc_agg_sig = timeout_qc_voters.powerset().oneOf() + nondet timeout_qc_votes = + timeout_qc_voters.setOfMaps(msgs_signed_timeout.map(sv => sv.vote)).oneOf() + val timeout_qc: TimeoutQC = { + votes: timeout_qc_votes, + agg_sig: timeout_qc_agg_sig, + ghost_view: timeout_qc_view, + } + // make new view + val new_view: NewView = { + justification: + if (justification_case == "commit") { + Commit(commit_qc) + } else { + Timeout(timeout_qc) + }, + sig: sig, + } + all { + // faulty replicas cannot forge the signatures of the correct replicas, enforce that + commit_qc_agg_sig.forall(id => or { + FAULTY.contains(id), + msgs_signed_commit.exists(sv => sv.sig == id and sv.vote == commit_qc_vote), + }), + timeout_qc_agg_sig.forall(id => or { + FAULTY.contains(id), + msgs_signed_timeout.exists(sv => sv.sig == id and sv.vote == timeout_qc_votes.get(id)), + }), + // send a new view + msgs_new_view' = msgs_new_view.union(Set(new_view)), + } + }, + all { + nondet msg_view = VIEWS.oneOf() + nondet sig = oneOf(FAULTY) + nondet justification_case = Set("commit", "timeout").oneOf() + // introduce a commit qc + nondet commit_qc_view = oneOf(VIEWS) + nondet commit_qc_block_number = oneOf(VIEWS) + nondet commit_qc_block_hash = oneOf(ALL_BLOCKS) + nondet commit_qc_agg_sig = REPLICAS.powerset().oneOf() + val commit_qc_vote: CommitVote = { + view: commit_qc_view, block_number: commit_qc_block_number, + block_hash: commit_qc_block_hash + } + val commit_qc = { + vote: commit_qc_vote, agg_sig: commit_qc_agg_sig, + } + // introduce a timeout qc + nondet timeout_qc_view = VIEWS.oneOf() + nondet timeout_qc_voters = REPLICAS.powerset().oneOf() + nondet timeout_qc_agg_sig = timeout_qc_voters.powerset().oneOf() + nondet timeout_qc_votes = + timeout_qc_voters.setOfMaps(msgs_signed_timeout.map(sv => sv.vote)).oneOf() + val timeout_qc: TimeoutQC = { + votes: timeout_qc_votes, + agg_sig: timeout_qc_agg_sig, + ghost_view: timeout_qc_view, + } + // non-deterministically select a block + nondet block = ALL_BLOCKS.oneOf() + nondet block_number = VIEWS.oneOf() + nondet has_block = oneOf(Bool) + // make new proposal + val proposal: Proposal = { + block: if (has_block) Some(block) else None, + justification: + if (justification_case == "commit") { + Commit(commit_qc) + } else { + Timeout(timeout_qc) + }, + sig: sig, + ghost_block_number: block_number, + } + all { + // faulty replicas cannot forge the signatures of the correct replicas, enforce that + commit_qc_agg_sig.forall(id => or { + FAULTY.contains(id), + msgs_signed_commit.exists(sv => sv.sig == id and sv.vote == commit_qc_vote), + }), + timeout_qc_agg_sig.forall(id => or { + FAULTY.contains(id), + msgs_signed_timeout.exists(sv => sv.sig == id and sv.vote == timeout_qc_votes.get(id)), + }), + msgs_proposal' = msgs_proposal.union(Set(proposal)), + }, + }, + ghost_step' = FaultyStep, + } + + // a step by one of the replicas without timeouts + action replica_step_no_timeout(id: ReplicaId):bool = all{ + any { + all { + msgs_signed_commit != Set(), + nondet signed_vote = oneOf(msgs_signed_commit) + on_commit(id, signed_vote), + }, + all { + msgs_signed_timeout != Set(), + nondet vote = oneOf(msgs_signed_timeout) + on_timeout(id, vote), + }, + all { + msgs_proposal != Set(), + nondet proposal = oneOf(msgs_proposal) + on_proposal(id, proposal), + }, + all { + msgs_new_view != Set(), + nondet new_view = oneOf(msgs_new_view) + on_new_view(id, new_view), + } + }, + } + + // a step by one of the correct replicas + action replica_step(id: ReplicaId):bool = all{ + any { + on_timer_is_finished(id), + replica_step_no_timeout(id), + }, + } + + action on_proposal(id: ReplicaId, proposal: Proposal): bool = all { + val self = replica_state.get(id) + // Get the proposal view. + val proposal_view = proposal.justification.view() + all { + // We only allow proposals for the current view if we have not voted in it yet. + or { + proposal_view == self.view and self.phase == PhasePrepare, + proposal_view > self.view + }, + // since our views are bounded, we ignores out of bounds views + VIEWS.contains(proposal_view), + // We ignore proposals from the wrong leader. + proposal.sig == leader.get(proposal_view), + // Check that the proposal is valid. + proposal.proposal_verify(), + // Get the implied block number and hash (if any). + val block_number_and_opt_block_hash = proposal.justification.get_implied_block() + val block_number = block_number_and_opt_block_hash._1 + val opt_block_hash = block_number_and_opt_block_hash._2 + // Vote only if you have collected all committed blocks so far. + // (See a check in all {...} below.) + + // Check if this is a reproposal or not, and do the necessary checks. + // As a side result, get the correct block hash. + val block_hash: BlockHash = match(opt_block_hash) { + | Some(hash) => { + // This is a reproposal. We let the leader repropose blocks without sending + // them in the proposal (it sends only the number + hash). That allows a + // leader to repropose a block without having it stored. + // It is an optimization that allows us to not wait for a leader that has + // the previous proposal stored (which can take 4f views), and to somewhat + // speed up reproposals by skipping block broadcast. + // This only saves time because we have a gossip network running in parallel, + // and any time a replica is able to create a finalized block (by possessing + // both the block and the commit QC) it broadcasts the finalized block (this + // was meant to propagate the block to full nodes, but of course validators + // will end up receiving it as well). + // However, this can be difficult to model and we might want to just + // ignore the gossip network in the formal model. We will still have liveness + // but in the model we'll end up waiting 4f views to get a leader that has the + // previous block before proposing a new one. This is not that bad, since + // then we can be sure that the consensus will continue even if the gossip + // network is failing for some reason. + + // See validation below. + hash + } + + | None => { + // See state verification below. + unwrap_or(proposal.block, "").hash() + } + } + + // the replica vote to send + val vote: CommitVote = { + view: proposal_view, + block_number: block_number, + block_hash: block_hash, + } + + all { + // Vote only if you have collected all committed blocks so far (see a check in all {...}). + block_number == length(self.committed_blocks), + // Validation. + if (is_some(opt_block_hash)) { + // For sanity reasons, we'll check that there's no block in the proposal. + // But this check should be completely unnecessary (in theory at least). + is_none(proposal.block) + } else { + all { + // This is a new proposal, so we need to verify it (i.e. execute it). + is_some(proposal.block), + // To verify the block, replica just tries to apply it to the current + // state. Current state is the result of applying all the committed blocks until now. + proposal.block.block_verify(block_number) + // We cache the new proposals, waiting for them to be committed. + // See the update to cached_proposals below. + } + }, + // Update the replica state + val self1 = { + ...self, + view: proposal_view, + phase: PhaseCommit, + high_vote: Some(vote), + cached_proposals: + if (is_none(proposal.block)) { + self.cached_proposals + } else { + // the implementation caches the block, we simply mark its number + hash + self.cached_proposals.union(Set((block_number, block_hash))) + } + } + val self2 = + match (proposal.justification) { + | Commit(qc) => self1.process_commit_qc(Some(qc)) + | Timeout(qc) => { + ...self1.process_commit_qc(qc.high_qc()), + high_timeout_qc: max_timeout_qc(Some(qc), self1.high_timeout_qc) + } + } + + replica_state' = replica_state.set(id, self2), + // Send the commit vote to all replicas (including ourselves). + msgs_signed_commit' = msgs_signed_commit.union(Set({ vote: vote, sig: sig_of_id(id) })), + // Record the justification. + ghost_justifications' = + ghost_justifications.put((id, proposal.justification.view()), proposal.justification), + // unchanged variables + msgs_signed_timeout' = msgs_signed_timeout, msgs_new_view' = msgs_new_view, + msgs_proposal' = msgs_proposal, store_signed_timeout' = store_signed_timeout, + store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, + ghost_step' = OnProposalStep(id), + } + } + } + + // a replica receives SignedCommitVote + action on_commit(id: ReplicaId, signed_vote: SignedCommitVote): bool = all { + val self = replica_state.get(id) + all { + // if the vote isn't current, just ignore it. + signed_vote.vote.view >= self.view, + // Check that the signed vote is valid. + signed_commit_vote_verify(signed_vote), + + // Store the vote. We will never store duplicate (same view and sender) votes. + is_new_signed_commit_vote_for_replica(id, signed_vote), + val new_store = store_signed_commit.get(id).union(Set(signed_vote)) + + all { + store_signed_commit' = store_signed_commit.set(id, new_store), + // Check if we now have a commit QC for this view. + val qc_opt = get_commit_qc(new_store, signed_vote) + match (qc_opt) { + | None => all { + replica_state' = replica_state, + msgs_new_view' = msgs_new_view, + ghost_justifications' = ghost_justifications, + } + + | Some(qc) => { + val self1 = self.process_commit_qc(Some(qc)) + start_new_view(id, self1, signed_vote.vote.view + 1, Commit(qc)) + } + }, + + msgs_signed_commit' = msgs_signed_commit, + msgs_proposal' = msgs_proposal, msgs_signed_timeout' = msgs_signed_timeout, + store_signed_timeout' = store_signed_timeout, + proposer_view' = proposer_view, + ghost_step' = OnCommitStep(id), + } + } + } + + // If this is the first view, we immediately timeout. This will force the replicas + // to synchronize right at the beginning and will provide a justification for the + // proposer at view 1. + // Currently, this is completely non-deterministic, that is, + // this action can be triggered at any step + // TODO: currently, timers are non-deterministic, model them as integers? + // https://github.com/dnkolegov/chonkybft-spec/issues/25 + action on_timer_is_finished(id: ReplicaId): bool = { + val self = replica_state.get(id) + val vote: SignedTimeoutVote = { + vote: { + view: self.view, + high_vote: self.high_vote, + high_commit_qc: self.high_commit_qc, + }, + sig: sig_of_id(id), + } + all { + // If we have already timed out, we don't need to send another timeout vote. + self.phase != PhaseTimeout, + replica_state' = replica_state.set(id, { ...self, phase: PhaseTimeout }), + msgs_signed_timeout' = msgs_signed_timeout.union(Set(vote)), + // unchanged variables + msgs_signed_commit' = msgs_signed_commit, msgs_new_view' = msgs_new_view, + msgs_proposal' = msgs_proposal, store_signed_timeout' = store_signed_timeout, + store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, + ghost_step' = OnTimerIsFinishedStep(id), + } + } + + // a replica receives SignedTimeoutVote + action on_timeout(id: ReplicaId, signed_vote: SignedTimeoutVote): bool = all { + val self = replica_state.get(id) + all { + // If the vote isn't current, just ignore it + signed_vote.vote.view >= self.view, + // Check that the signed vote is valid. + signed_vote.signed_timeout_vote_verify(), + // Store the vote. We will never store duplicate (same view and sender) votes. + is_new_signed_timeout_vote_for_replica(id, signed_vote), + val new_store = store_signed_timeout.get(id).union(Set(signed_vote)) + + all { + store_signed_timeout' = store_signed_timeout.set(id, new_store), + // Check if we now have a timeout QC for this view. + val qc_opt = get_timeout_qc(new_store, signed_vote.vote.view) + match (qc_opt) { + | None => all { + replica_state' = replica_state, + msgs_new_view' = msgs_new_view, + ghost_justifications' = ghost_justifications, + } + + | Some(qc) => all { + val self1 = { + ...self.process_commit_qc(qc.high_qc()), + high_timeout_qc: max_timeout_qc(qc_opt, self.high_timeout_qc) + } + start_new_view(id, self1, signed_vote.vote.view + 1, Timeout(qc)), + } + }, + // unchanged variables + msgs_signed_timeout' = msgs_signed_timeout, msgs_signed_commit' = msgs_signed_commit, + msgs_proposal' = msgs_proposal, proposer_view' = proposer_view, + store_signed_commit' = store_signed_commit, + ghost_step' = OnTimeoutStep(id), + } + } + } + + // a replica receives NewView + action on_new_view(id: ReplicaId, new_view: NewView): bool = all { + val self = replica_state.get(id) + val new_view_view = new_view.justification.view() + all { + // If the message isn't current, just ignore it. + new_view_view >= self.view, + // Check that the new view is valid. + new_view.new_view_verify(), + // Update our state. + val self1 = + match (new_view.justification) { + | Commit(qc) => self.process_commit_qc(Some(qc)) + + | Timeout(qc) => { + ...self.process_commit_qc(qc.high_qc()), + high_timeout_qc: max_timeout_qc(Some(qc), self.high_timeout_qc) + } + } + + if (new_view_view > self.view) { + start_new_view(id, self1, new_view_view, new_view.justification) + } else { + all { + replica_state' = replica_state, + msgs_new_view' = msgs_new_view, + ghost_justifications' = ghost_justifications, + } + }, + // unchanged variables + msgs_signed_timeout' = msgs_signed_timeout, msgs_signed_commit' = msgs_signed_commit, + msgs_proposal' = msgs_proposal, store_signed_timeout' = store_signed_timeout, + store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, + ghost_step' = OnNewViewStep({ id: id, view: new_view_view }), + } + } + + // A step by the proposer for replica_id. + // We pass new_block, which is used a substitute for: self.create_proposal(block_number). + action proposer_step(replica_id: ReplicaId, new_block: Block): bool = all { + val replica_state_view = replica_state.get(replica_id).view + val proposer_state_view = proposer_view.get(replica_id) + all { + // Check if we are in a new view and we are the leader. + // Note that by definition, it's impossible to be the leader for view 0. + // We always let view 0 timeout. + // This code only works for a non-faulty proposer. + CORRECT.contains(replica_id), + proposer_state_view < replica_state_view, + sig_of_id(replica_id) == leader.get(replica_state_view), + proposer_view' = proposer_view.set(replica_id, replica_state_view), + // Get the justification for this view. If we have both a commit QC + // and a timeout QC for this view (highly unlikely), we should prefer + // to use the commit QC. + val justification = ghost_justifications.get((replica_id, replica_state_view)) + // Get the block number and check if this must be a reproposal. + val block_and_hash = justification.get_implied_block() + val block_number = block_and_hash._1 + val opt_block_hash = block_and_hash._2 + // Propose only if you have collected all committed blocks so far: see in all {...} + // Now we create the block. + val block = + match (opt_block_hash) { + | Some(h) => + // There was some proposal last view that at least SUBQUORUM_WEIGHT replicas + // voted for and could have been finalized. We need to repropose it. + None + | None => + // The previous proposal was finalized, so we can propose a new block. + // If we don't have the previous block, this call will fail. This is + // fine as we can just not propose anything and let our turn end. + // (see the check below) + Some(new_block) + } + + // Create the proposal message and send it to all replicas + val proposal: Proposal = { + block: block, + justification: justification, + sig: sig_of_id(replica_id), + ghost_block_number: block_number, + } + all { + // Propose only if you have collected all committed blocks so far. + val committed_blocks = replica_state.get(replica_id).committed_blocks + block_number == length(committed_blocks), + // check that we have received the previous block, if we are proposing a new one + or { + is_none(block), + block_number == 0, + replica_state.get(replica_id).cached_proposals.exists(((bn, bh)) => { + block_number == bn + 1 + }), + }, + // send the proposal + msgs_proposal' = msgs_proposal.union(Set(proposal)), + }, + // unchanged variables + replica_state' = replica_state, msgs_signed_timeout' = msgs_signed_timeout, + msgs_signed_commit' = msgs_signed_commit, msgs_new_view' = msgs_new_view, + store_signed_timeout' = store_signed_timeout, ghost_justifications' = ghost_justifications, + store_signed_commit' = store_signed_commit, + ghost_step' = ProposerStep(replica_id), + } + } + + // a correct replica changes to a new view + action start_new_view(id: ReplicaId, self: ReplicaState, view: ViewNumber, justification: Justification): bool = { + // make sure that we do not go out of bounds with the new view + all { + VIEWS.contains(view), + // switch to the Prepare phase + replica_state' = replica_state.set(id, { ...self, view: view, phase: PhasePrepare }), + // Send a new view message to the other replicas for synchronization + msgs_new_view' = msgs_new_view.union(Set({ + justification: justification, + sig: sig_of_id(id) + })), + // store the justification + ghost_justifications' = ghost_justifications.put((id, view), justification) + } + } + + // fetch a missing block from the peers (executed concurrently with the proposer and replica) + action fetcher_step(id: ReplicaId): bool = { + val self = replica_state.get(id) + match (self.high_commit_qc) { + | None => + all { + // do nothing + false, + unchanged_replica, + msgs_signed_timeout' = msgs_signed_timeout, msgs_new_view' = msgs_new_view, + msgs_proposal' = msgs_proposal, msgs_signed_commit' = msgs_signed_commit, + ghost_step' = ghost_step, + } + + | Some(qc) => + val next_block_number = self.committed_blocks.length() + all { + qc.vote.block_number >= next_block_number, + // query other replicas whether they have a CommittedBlock + // with the given number, and fetch it from them. The fetched block and + // its commit_qc are verified locally. + any { + // fetch the block from a correct replica + nondet other_id = oneOf(CORRECT) + val other = replica_state.get(other_id) + all { + other.committed_blocks.length() > next_block_number, + val fetched_block = other.committed_blocks[next_block_number] + val new_self = { + ...self, + committed_blocks: self.committed_blocks.append(fetched_block) + } + replica_state' = replica_state.set(id, new_self) + }, + // fetch the block from a faulty replica + // introduce a commit qc + nondet commit_qc_view = oneOf(VIEWS) + nondet commit_qc_block_number = oneOf(VIEWS) + nondet commit_qc_block = oneOf(ALL_BLOCKS) + nondet commit_qc_agg_sig = REPLICAS.powerset().oneOf() + val commit_qc_vote: CommitVote = { + view: commit_qc_view, block_number: commit_qc_block_number, + block_hash: commit_qc_block + } + val commit_qc = { vote: commit_qc_vote, agg_sig: commit_qc_agg_sig } + val fetched_block: CommittedBlock = { commit_qc: qc, block: commit_qc_block } + // verify the block + all { + FAULTY != Set(), + // faulty replicas cannot forge the signatures of the correct replicas, enforce that + commit_qc_agg_sig.forall(id => or { + FAULTY.contains(id), + msgs_signed_commit.exists(sv => sv.sig == id and sv.vote == commit_qc_vote), + }), + // the replica verifies the received commit_qc + commit_qc_verify(commit_qc), + // store the received block + val new_self = { + ...self, + committed_blocks: self.committed_blocks.append(fetched_block) + } + replica_state' = replica_state.set(id, new_self), + } + }, + ghost_step' = FetchStep(id), + store_signed_timeout' = store_signed_timeout, store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, replica_view' = replica_view, + msgs_signed_timeout' = msgs_signed_timeout, msgs_new_view' = msgs_new_view, + msgs_proposal' = msgs_proposal, msgs_signed_commit' = msgs_signed_commit, + } + } + } + + // part of the state that is not touched by the Byzantine validators + action unchanged_replica = all { + store_signed_timeout' = store_signed_timeout, store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, + replica_view' = replica_view, replica_state' = replica_state, + } + + // ------------------------------------------------------------------------- + // Auxiliary definitions + + def block_verify(block: Option[Block], implied_block_number: BlockNumber): bool = { + // Validation is application-specific. + // For specification purposes, we check validity via ValidBlocks. + match (block) { + | Some(blk) => and { + VALID_BLOCKS.contains(blk), + // The implementation should check that the implied block number + // belongs to the data blob passed in block, as it can interpret the block. + // The specification does not make such a connection. + // The relation between the block numbers and committed blocks + // is checked in on_proposal. + implied_block_number >= 0, + } + | None => false + } + } + + // Processed an (already verified) commit_qc received from the network + // as part of some message. It bumps the local high_commit_qc and if + // we have the proposal corresponding to this qc, we append it to the committed_blocks. + def process_commit_qc(self: ReplicaState, qc_opt: Option[CommitQC]): ReplicaState = { + match (qc_opt) { + | None => self + + | Some(qc) => + if (self.cached_proposals.contains((qc.vote.block_number, qc.vote.block_hash)) + and self.committed_blocks.length() == qc.vote.block_number) { + // NOTE: the implementation retrieves the block by its hash from cached_proposals. + // In the Quint spec, the hash and the block are identical, so we simply put the hash. + val block = qc.vote.block_hash + { + ...self, + committed_blocks: self.committed_blocks.append({ block: block, commit_qc: qc }), + high_commit_qc: max_commit_qc(Some(qc), self.high_commit_qc) + } + } else { + { + ...self, + high_commit_qc: max_commit_qc(Some(qc), self.high_commit_qc) + } + } + } + } + + // given two QCs, return the one with the largest view, + // or the left one, in case of the same view + def max_timeout_qc(qc1_opt: Option[TimeoutQC], qc2_opt: Option[TimeoutQC]): Option[TimeoutQC] = { + match (qc2_opt) { + | None => qc1_opt + | Some(qc2) => + match (qc1_opt) { + | None => qc2_opt + | Some(qc1) => + if (qc1.ghost_view >= qc2.ghost_view) qc1_opt else qc2_opt + } + } + } + + // given two QCs, return the one with the largest view, + // or the left one, in case of the same view + def max_commit_qc(qc1_opt: Option[CommitQC], qc2_opt: Option[CommitQC]): Option[CommitQC] = { + match (qc2_opt) { + | None => qc1_opt + | Some(qc2) => + match(qc1_opt) { + | None => qc2_opt + | Some(qc1) => + if (qc1.vote.view >= qc2.vote.view) qc1_opt else qc2_opt + } + } + } + + // If there is a quorum among timeout votes, return `Some(qc)`, + // where qc is the largest quorum among the votes. Otherwise, return None. + def get_timeout_qc(votes: Set[SignedTimeoutVote], view: ViewNumber): Option[TimeoutQC] = { + val view_signed_votes = votes.filter(v => v.vote.view == view) + if (view_signed_votes.size() < QUORUM_WEIGHT) { + None + } else { + // Importantly, view_signed_votes does not have two votes with the same signature. + // We check that in store_signed_timeout_all_inv. + val qc_votes: ReplicaKey -> TimeoutVote = + view_signed_votes.map(v => (v.sig, v.vote)).setToMap() + Some({ + votes: qc_votes, + agg_sig: view_signed_votes.map(v => v.sig), + ghost_view: view + }) + } + } + + // If we have identical commit votes with at least QUORUM_WEIGHT + // from different replicas, we can create a commit quorum certificate locally, and return `Some(qc)`. + // Otherwise, return None. + def get_commit_qc(votes: Set[SignedCommitVote], last_signed_vote: SignedCommitVote): Option[CommitQC] = { + // group the votes by the same view, same block hash, and same block number + pure val similar_votes: Set[SignedCommitVote] = + votes.filter(v => and { + v.vote.view == last_signed_vote.vote.view, + v.vote.block_hash == last_signed_vote.vote.block_hash, + v.vote.block_number == last_signed_vote.vote.block_number, + }) + // count the votes for the block hash of vote + pure val votes_count = similar_votes.size() + if (votes_count < QUORUM_WEIGHT) { + None + } else { + // There is only one block hash that has a quorum. + // If this is not the case, one_commit_quorum is violated. + Some({ + vote: last_signed_vote.vote, + agg_sig: similar_votes.map(v => v.sig), + }) + } + } + + def justification_verify(justification: Justification): bool = { + match (justification) { + | Timeout(qc) => + qc.timeout_qc_verify() + + | Commit(qc) => + qc.commit_qc_verify() + } + } + + def proposal_verify(p: Proposal):bool = all { + p.justification.justification_verify(), + } + + // Get the view of a justification + pure def view(justification: Justification): ViewNumber = { + match (justification) { + | Commit(qc) => qc.vote.view + 1 + | Timeout(qc) => qc.ghost_view + 1 + } + } + + pure def new_view_verify(new_view: NewView): bool = all { + // Check that the justification is valid. + new_view.justification.justification_verify() + } + + // This function verifies an aggregated signature and checks it has enough weight. + // In here we need to not only check the signature, but also that + // it has enough weight beyond it. + pure def aggregate_verify(agg_sig: AggregateSignature): bool = and { + agg_sig.size() >= QUORUM_WEIGHT, + agg_sig.subseteq(REPLICAS.map(id => sig_of_id(id))), + } + + pure def signed_timeout_vote_verify(signed_vote: SignedTimeoutVote): bool = all { + signed_vote.vote.view >= 0, + signed_vote.vote.high_commit_qc.option_has(qc => commit_qc_verify(qc)), + timeout_vote_verify(signed_vote.vote) + } + + pure def signed_commit_vote_verify(signed_vote: SignedCommitVote): bool = all { + commit_vote_verify(signed_vote.vote), + // we can do this check as a correct replica knows the public keys of the other replicas + REPLICAS.exists(id => sig_of_id(id) == signed_vote.sig), + } + + // Commit vote verification that a replica can do locally + pure def commit_vote_verify(vote: CommitVote): bool = all { + vote.view >= 0, + vote.block_number >= 0, + } + + // Timeout vote verification that a replica can do locally + // There are two invariants that are easy to check: + // self.view() >= self.high_vote.view() && self.high_vote.view() >= self.high_commit_qc.view() + pure def timeout_vote_verify(vote: TimeoutVote): bool = all { + vote.view >= 0, + match vote.high_vote { + | None => true + | Some(high_vote) => and { + commit_vote_verify(high_vote), + vote.view >= high_vote.view, + match vote.high_commit_qc { + | None => true + | Some(high_commit_qc) => high_vote.view >= high_commit_qc.vote.view + } + } + } + } + + // Commit QC verification that a replica can do locally + pure def commit_qc_verify(qc: CommitQC): bool = and { + commit_vote_verify(qc.vote), + // In here we need to not only check the signature, but also that + // it has enough weight beyond it. + qc.agg_sig.aggregate_verify() + } + + // Timeout QC verification that a replica can do locally + pure def timeout_qc_verify(qc: TimeoutQC): bool = { + val votes = qc.votes.keys().map(pk => qc.votes.get(pk)) + and { + // all votes are valid + votes.forall(v => timeout_vote_verify(v)), + // Check that all votes have the same view. + // We are using the ghost view to do this check. + votes.forall(v => v.view == qc.ghost_view), + // In here we need to not only check the signature, but also that + // it has enough weight beyond it. + qc.agg_sig.aggregate_verify(), + // It is essential that no vote is missing + qc.votes.keys() == qc.agg_sig, + // Get the high commit QC of the timeout QC. We compare the high QC field of all + // timeout votes in the QC, and get the highest one, if it exists. + // We then need to verify that it is valid. We don't need to verify the commit QCs + // of the other timeout votes, since they don't have any influence in the result. + match (qc.high_qc()) { + | None => true + | Some (high_qc) => high_qc.commit_qc_verify() + } + } + } + + def is_new_signed_commit_vote_for_replica(id: ReplicaId, signed_vote: SignedCommitVote):bool = { + store_signed_commit.get(id).forall(v => or { + v.vote.view != signed_vote.vote.view, + v.sig != signed_vote.sig, + }) + } + + def is_new_signed_timeout_vote_for_replica(id: ReplicaId, signed_vote: SignedTimeoutVote):bool = { + store_signed_timeout.get(id).forall(v => or { + v.vote.view != signed_vote.vote.view, + v.sig != signed_vote.sig + }) + } + + // This returns the block number that is implied by this justification. + // If the justification requires a block reproposal, it also returns + // the hash of the block that must be reproposed. + pure def get_implied_block(justification: Justification): (BlockNumber, Option[BlockHash]) = { + match (justification) { + | Commit(qc) => { + // The previous proposal was finalized, so we can propose a new block. + (qc.vote.block_number + 1, None) + } + + | Timeout(qc) => { + // Get the high vote of the timeout QC, if it exists. We check if there are + // timeout votes with at least an added weight of SUBQUORUM_WEIGHT, + // that have a high vote field for the same block. A QC can have + // 0, 1 or 2 such blocks. + // If there's only 1 such block, then we say the QC has a high vote. + // If there are 0 or 2 such blocks, we say the QC has no high vote. + pure val qc_high_vote: Option[CommitVote] = qc.high_vote() + // Get the high commit QC of the timeout QC. We compare the high QC field of + // all timeout votes in the QC, and get the highest one, if it exists. + match (qc_high_vote) { + | Some(hv) => + match (high_qc(qc)) { + | Some(hqc) => + if (hv.block_number > hqc.vote.block_number) { + // There was some proposal last view that might have been finalized. + // We need to repropose it. + (hv.block_number, Some(hv.block_hash)) + } else { + // Either the previous proposal was finalized or we know for certain + // that it couldn't have been finalized. Either way, we can propose + // a new block. + (hqc.vote.block_number + 1, None) + } + + | None => + // There was some proposal last view that might have been finalized. + // We need to repropose it. + (hv.block_number, Some(hv.block_hash)) + } + + | None => + // Either the previous proposal was finalized or we know for certain + // that it couldn't have been finalized. Either way, we can propose + // a new block. + match (high_qc(qc)) { + | Some(hqc) => (hqc.vote.block_number + 1, None) + | None => (0, None) + } + } + } + } + } + + // Get the high vote of the QC, if it exists. We check if there are + // timeout votes with at least an added weight of SUBQUORUM_WEIGHT, + // that have a high vote field for the same block. A QC can have + // 0, 1 or 2 such blocks. + // If there's only 1 such block, then we say the QC has a high vote. + // If there are 0 or 2 such blocks, we say the QC has no high vote. + pure def high_vote(qc: TimeoutQC): Option[CommitVote] = { + // find all kinds of high votes among the QC votes + pure val high_votes = qc.votes.keys().map(pk => qc.votes.get(pk).high_vote) + // compute the pair (high_vote, nr_of_votes) for each high_vote in high_votes + pure val high_votes_sizes = + high_votes.map(hv => (hv, qc.votes.keys().filter(pk => qc.votes.get(pk).high_vote == hv).size())) + // Select all high_vote values that go over the threshold. + val subquorums = high_votes_sizes.fold([], (hv_list, hvs) => { + if (hvs._2 >= SUBQUORUM_WEIGHT) { + hv_list.append(hvs._1) + } else { + hv_list + } + }) + // if there only one high_vote possible, return it + if (subquorums.length() == 1) { + subquorums[0] + } else { + None + } + } + + /// Get the high QC of the timeout QC. We compare the high QC field of all 4f+1 + /// timeout votes in the QC, and get the highest one. + pure def high_qc(qc: TimeoutQC): Option[CommitQC] = { + // Assumption: qc.votes does not have two different QCs for the same view. + qc.votes.keys().map(pk => qc.votes.get(pk)).fold(None, (max_qc_opt, v) => { + match (v.high_commit_qc) { + | None => max_qc_opt + | Some(hc_qc) => + // if max_qc is None, choose hc_qc, otherwise compare the views + match (max_qc_opt) { + | None => + Some(hc_qc) + | Some(max_qc) => + if (hc_qc.vote.view > max_qc.vote.view) Some(hc_qc) else max_qc_opt + } + } + }) + } + + // ------------------------------------------------------------------------- + // Invariants to check + + // No two correct replicas disagree on the committed blocks. + val agreement_inv = tuples(CORRECT, CORRECT).forall(((id1, id2)) => { + val blocks1 = replica_state.get(id1).committed_blocks + val blocks2 = replica_state.get(id2).committed_blocks + or { + // ignore this case, as there is a symmetric one + blocks1.length() > blocks2.length(), + // the shorter sequence is a prefix of the longer one + blocks1.indices().forall(i => blocks1[i].block == blocks2[i].block) + } + }) + + // every entry in committed_blocks must have a justification + val committed_blocks_have_justification_inv = { + CORRECT.forall(id => { + val committed_blocks = replica_state.get(id).committed_blocks + committed_blocks.indices().forall(block_number => { + val block_hash = committed_blocks[block_number].block + ghost_justifications.keys().exists(((id2, view)) => and { + id2 == id, + match (ghost_justifications.get((id2, view))) { + | Timeout(qc) => false + | Commit(qc) => and { + qc.vote.block_number == block_number, + qc.vote.block_hash == block_hash, + view == qc.vote.view or view == qc.vote.view + 1, + } + } + }) + }) + }) + } + + // a correct proposer should not equivocate for the same view + val no_proposal_equivocation_inv = tuples(msgs_proposal, msgs_proposal).forall(((m1, m2)) => or { + not(m1.justification.view() == m2.justification.view() and m1.sig == m2.sig), + FAULTY.exists(id => sig_of_id(id) == m1.sig), + m1.block == m2.block, + }) + + // a correct proposer should not send two commits in the same view + val no_commit_equivocation_inv = tuples(msgs_signed_commit, msgs_signed_commit).forall(((m1, m2)) => or { + not(m1.vote.view == m2.vote.view and m1.sig == m2.sig), + FAULTY.exists(id => sig_of_id(id) == m1.sig), + m1.vote.block_hash == m2.vote.block_hash and m1.vote.block_number == m2.vote.block_number, + }) + + // a correct proposer should not send two timeouts in the same view + val no_timeout_equivocation_inv = tuples(msgs_signed_timeout, msgs_signed_timeout).forall(((m1, m2)) => or { + not(m1.vote.view == m2.vote.view and m1.sig == m2.sig), + FAULTY.exists(id => sig_of_id(id) == m1.sig), + m1.vote.high_vote == m2.vote.high_vote and m1.vote.high_commit_qc == m2.vote.high_commit_qc, + }) + + // a correct proposer should not send two new views in the same view + val no_new_view_equivocation_inv = { + tuples(msgs_new_view, msgs_new_view).forall(((m1, m2)) => or { + FAULTY.exists(id => sig_of_id(id) == m1.sig or sig_of_id(id) == m2.sig), + m1.sig != m2.sig, + m1.justification.view() != m2.justification.view(), + m1.justification == m2.justification, + }) + } + + // Make sure that the commit votes in the store are well-formed. + val store_signed_commit_all_inv: bool = { + CORRECT.forall(id => { + val signed_votes = store_signed_commit.get(id) + signed_votes.forall(v1 => and { + or { + aux_commit_vote_inv(v1), + FAULTY.exists(id => sig_of_id(id) == v1.sig), + }, + signed_votes.forall(v2 => and { + // no duplicates + (v1.sig == v2.sig and v1.vote.view == v2.vote.view) implies (v1.vote == v2.vote), + }) + }) + }) + } + + // Make sure that the timeout votes in the store are well-formed. + val store_signed_timeout_all_inv: bool = { + CORRECT.forall(id => { + val signed_votes = store_signed_timeout.get(id) + signed_votes.forall(v1 => { + signed_votes.forall(v2 => and { + // no duplicates + (v1.sig == v2.sig and v1.vote.view == v2.vote.view) implies (v1.vote == v2.vote), + // the quorums have an intersection + match (v1.vote.high_commit_qc) { + | None => true + | Some(qc1) => + match (v2.vote.high_commit_qc) { + | None => true + | Some(qc2) => or { + qc1.vote.view != qc2.vote.view, + // TODO: use QUORUM_WEIGHT - F? + qc1.agg_sig.intersect(qc2.agg_sig).size() >= SUBQUORUM_WEIGHT, + } + } + } + }) + }) + }) + } + + // View change only on QC. + // Replicas should only change view when they receive a QC (commit or timeout) that justifies it. + // This invariant checks that every justification is verifiable, and that it does not come from the future. + val view_justification_inv = { + ghost_justifications.keys().forall(((id, v)) => and { + 1 <= v and v <= replica_state.get(id).view, + justification_verify(ghost_justifications.get((id, v))), + leader.keys().contains(v), + }) + } + + // If a correct replica reaches a view, then, collectively, the correct replicas + // have justifications for the views below. + val view_justification_continuous_inv = { + VIEWS.forall(v => or { + CORRECT.forall(id => replica_state.get(id).view < v), + ghost_justifications.keys().exists(((id, other_view)) => other_view == v) + }) + } + + // Checks that for every justification stored by a replica `r1`, + // the votes by the correct replicas are backed by sent messages. + val justification_is_supported_inv: bool = { + ghost_justifications.keys().forall(((pk1, replica_view)) => { + match (ghost_justifications.get((pk1, replica_view))) { + | Timeout(qc) => and { + replica_view > 0, + // Only check that quorums are supported by sent votes. + // The quorum invariants are checked in view_justification_inv. + qc.votes.keys().forall(pk2 => or { + FAULTY.exists(id => sig_of_id(id) == pk2), + val vote = qc.votes.get(pk2) + msgs_signed_timeout.exists(signed_vote => signed_vote.vote == vote) + }) + } + + | Commit(qc) => and { + replica_view > 0, + // Only check that quorums are supported by the sent votes. + // The quorum invariants are checked in view_justification_inv. + qc.agg_sig.forall(pk2 => or { + FAULTY.map(id => sig_of_id(id)).contains(pk2), + msgs_signed_commit.exists(signed_vote => signed_vote.vote == qc.vote) + }) + } + } + }) + } + + // there are no two high votes supported by subquorum weight (2 * F + 1) in a single TimeoutQC + def one_high_vote_in_timeout_qc_inv: bool = { + def one_high_vote(qc: TimeoutQC): bool = { + pure val votes = qc.votes.keys().map(pk => qc.votes.get(pk)) + pure val high_votes = votes.map(v => v.high_vote) + // compute the pair (high_vote, nr_of_votes) for each high_vote in high_votes + pure val high_vote_count = + high_votes.mapBy(hv => votes.filter(v => v.high_vote == hv).size()) + tuples(high_votes, high_votes).forall(((hv1, hv2)) => or { + hv1 == hv2, + high_vote_count.get(hv1) < SUBQUORUM_WEIGHT, + high_vote_count.get(hv2) < SUBQUORUM_WEIGHT, + }) + } + + replica_state.keys() + .forall(id => replica_state.get(id).high_timeout_qc.option_has(one_high_vote)) + } + + // There are no two quorums for different block hashes in one view. + // Importantly, faulty replicas may send multiple votes with different block numbers. + def one_commit_quorum_inv: bool = { + def one_commit_quorum_per_view(fixed_view: ViewNumber): bool = { + val votes = msgs_signed_commit.filter(v => v.vote.view == fixed_view) + val hashes = votes.map(v => v.vote.block_hash) + val vote_count = + hashes.mapBy(h => votes.filter(v => v.vote.block_hash == h).map(v => v.sig).size()) + tuples(hashes, hashes).forall(((h1, h2)) => or { + h1 == h2, + vote_count.get(h1) < QUORUM_WEIGHT, + vote_count.get(h2) < QUORUM_WEIGHT, + }) + } + + msgs_signed_commit + .map(v => v.vote.view) + .forall(v => one_commit_quorum_per_view(v)) + } + + // integrity of high_commit_qc for all replicas + val all_replicas_high_commit_qc_inv: bool = { + CORRECT.forall(id => { + aux_commit_qc_inv(replica_state.get(id).high_commit_qc) + }) + } + + // basic integrity invariant for commit_qc + def aux_commit_qc_inv(qc_opt: Option[CommitQC]): bool = { + match (qc_opt) { + | None => true + | Some(qc) => and { + qc.vote.view >= 0, + qc.vote.block_number >= 0, + VALID_BLOCKS.contains(qc.vote.block_hash), + qc.agg_sig.aggregate_verify() + } + } + } + + // integrity of high_timeout_qc for all replicas + val all_replicas_high_timeout_qc_inv: bool = { + CORRECT.forall(id => { + aux_timeout_qc_inv(replica_state.get(id).high_timeout_qc) + }) + } + + // integrity invariant for TimeoutQC + def aux_timeout_qc_inv(qc_opt: Option[TimeoutQC]): bool = { + match (qc_opt) { + | None => true + | Some(qc) => + val signed_votes = qc.votes.keys().map(pk => {vote: qc.votes.get(pk), sig: pk }) + val votes = signed_votes.map(v => v.vote) + val public_keys = qc.votes.keys() + and { + qc.ghost_view >= 0, + qc.agg_sig.aggregate_verify(), + signed_votes.size() == qc.agg_sig.size(), + public_keys == qc.agg_sig, + signed_votes.forall(v => v.signed_timeout_vote_verify()), + // make sure that all votes are cast for the same view + votes.forall(v => qc.ghost_view == v.view), + // even though quorums in the votes may slightly differ, + // they must have enough votes in the intersection + signed_votes.forall(v1 => signed_votes.forall(v2 => { + match (v1.vote.high_commit_qc) { + | None => true + | Some(qc1) => + match (v2.vote.high_commit_qc) { + | None => true + | Some(qc2) => or { + qc1.vote.view != qc2.vote.view, + FAULTY.exists(id => sig_of_id(id) == v1.sig or sig_of_id(id) == v2.sig), + // TODO: use QUORUM_WEIGHT - F? + qc1.agg_sig.intersect(qc2.agg_sig).size() >= SUBQUORUM_WEIGHT, + } + } + } + })) + } + } + } + + // a state invariant to guarantee that correct replicas do not send malformed timeout votes + def msgs_signed_timeout_inv: bool = { + msgs_signed_timeout.forall(v => { + CORRECT.exists(id => sig_of_id(id) == v.sig) implies signed_timeout_vote_verify(v) + }) + } + + // a state invariant to guarantee that correct replicas do not send malformed signed commit votes + def msgs_signed_commit_inv: bool = { + msgs_signed_commit.forall(v => or { + FAULTY.exists(id => sig_of_id(id) == v.sig), + aux_commit_vote_inv(v), + }) + } + + // basic integrity invariant for commit vote + def aux_commit_vote_inv(signed_vote: SignedCommitVote): bool = and { + signed_vote.vote.view >= 0, + signed_vote.vote.block_number >= 0, + REPLICAS.exists(id => sig_of_id(id) == signed_vote.sig), + VALID_BLOCKS.contains(signed_vote.vote.block_hash), + } + + // integrity of high_vote for all replicas + val all_replicas_high_vote_inv: bool = { + CORRECT.forall(id => { + val replica_high_vote = replica_state.get(id).high_vote + val replica_view = replica_state.get(id).view + match replica_high_vote { + | None => + // no commit votes sent so far + msgs_signed_commit.forall(sv => sv.sig != sig_of_id(id)) + + | Some(high_vote) => all { + // this vote is valid + aux_commit_vote_inv({vote: high_vote, sig: sig_of_id(id) }), + // this vote was signed and sent + { vote: high_vote, sig: sig_of_id(id) }.in(msgs_signed_commit), + // there is no higher signed commit + msgs_signed_commit.forall(sv => or { + sv.sig != sig_of_id(id), + sv.vote.view <= high_vote.view + }), + // views of the replica can't be lower than its `high_vote`'s view + replica_view >= high_vote.view, + match (replica_state.get(id).high_commit_qc) { + | None => true + | Some(qc) => high_vote.view >= qc.vote.view + } + } + } + }) + } + + // a high vote is the highest among the votes sent by a replica + val timeout_high_vote_is_highest_inv: bool = { + msgs_signed_timeout.forall(tv => or { + FAULTY.exists(id => sig_of_id(id) == tv.sig), + // get the commit messages below the view of the timeout vote + val commit_votes: Set[SignedCommitVote] = + msgs_signed_commit.filter(sv => and { + sv.sig == tv.sig, + sv.vote.view <= tv.vote.view, + }) + // find the highest among them, if any + val highest: Option[CommitVote] = + commit_votes + .fold(COMMIT_VOTE_NONE, (hv_opt, sv) => { + match (hv_opt) { + | None => Some(sv.vote) + | Some(hv) => if (hv.view > sv.vote.view) Some(hv) else Some(sv.vote) + } + }) + // it should be equal to high_vote + tv.vote.high_vote == highest + }) + } + + // make sure that a correct replica is voting for a block that can be retrieved + // from sufficiently many other correct replicas + val all_replicas_commit_is_backed_by_subquorum_inv: bool = { + msgs_signed_commit.forall(signed_vote => or { + // if signed_vote is sent by a faulty replica, ignore it + FAULTY.contains(signed_vote.sig), + // if signed_vote is sent by a correct replica, + // count the number of correct replicas that have the previous block + val pair = (signed_vote.vote.block_number, signed_vote.vote.block_hash) + val correct_with_block = CORRECT.filter(id => { + replica_state.get(id).cached_proposals.contains(pair) + }) + or { + // TODO: use SUBQUORUM_WEIGHT + correct_with_block.size() >= QUORUM_WEIGHT, + // the first block does not need a quorum + signed_vote.vote.block_number <= 1, + } + }) + } + + // All invariants + // TODO: add the latest invariant about quorum of votes for each proposed block + val all_invariants = all { + agreement_inv, + committed_blocks_have_justification_inv, + no_proposal_equivocation_inv, + no_commit_equivocation_inv, + no_timeout_equivocation_inv, + no_new_view_equivocation_inv, + store_signed_commit_all_inv, + store_signed_timeout_all_inv, + view_justification_inv, + justification_is_supported_inv, + one_high_vote_in_timeout_qc_inv, + one_commit_quorum_inv, + all_replicas_high_commit_qc_inv, + all_replicas_high_timeout_qc_inv, + msgs_signed_timeout_inv, + msgs_signed_commit_inv, + all_replicas_high_vote_inv, + timeout_high_vote_is_highest_inv, + } + + // ------------------------------------------------------------------------- + // Falsy invariants to check reachability of certain states + + // check this invariant to see an example of reaching PhaseCommit + val phase_commit_example = CORRECT.forall(id => replica_state.get(id).phase != PhaseCommit) + + // check this invariant to see an example of a non-empty timeout QC + val high_timeout_qc_example = CORRECT.forall(id => and { + match (replica_state.get(id).high_timeout_qc) { + | None => true + | Some(qc) => qc.votes.keys().size() == 0 + } + }) + + // check this invariant to see an example of having a timeout quorum: + val timeout_qc_example = msgs_signed_timeout.map(m => (m.sig, m.vote.view)).size() < QUORUM_WEIGHT + + // check this invariant to see an example of having at least QUORUM_WEIGHT message sent for the same view + val commit_qc_example = VIEWS.forall(v => { + msgs_signed_commit.filter(signed_vote => signed_vote.vote.view == v).size() < QUORUM_WEIGHT + }) + + // Check this invariant to see an example of a justification. + // (Except view 0, which is just a bootstrapping view.) + val justification_example = + ghost_justifications.keys().exists(((id, v)) => v > 0) + + // check this invariant to see an example of having a finalized block: + val one_block_example = CORRECT.forall(id => { + not(replica_state.get(id).committed_blocks.length() > 0) + }) + + // Check this invariant to see an example of two blocked being finalized by a single replica. + // This example is important, as finalizing just 1 block does not involve syncing. + val two_chained_blocks_example = CORRECT.forall(id => { + not(replica_state.get(id).committed_blocks.length() > 1) + }) + + // check this invariant to see an example of a commit-QC equal to None + val high_commit_qc_example = CORRECT.forall(id => { + val self = replica_state.get(id) + is_none(self.high_commit_qc) + }) + + // check this invariant to see an example of a replica reaching a view > 2 + val views_over_2_example = CORRECT.forall(id => { + replica_state.get(id).view <= 2 + }) +} \ No newline at end of file diff --git a/spec/protocol-spec/state_monitor.qnt b/spec/protocol-spec/state_monitor.qnt new file mode 100644 index 00000000..08f3f153 --- /dev/null +++ b/spec/protocol-spec/state_monitor.qnt @@ -0,0 +1,58 @@ +// A state monitor that allows us to compare new state against the old states. +// For example, we compare phase transitions. +// To do so, we save the old replica states in old_replica_state. +module state_monitor { + import types.* from "./types" + import option.* from "./option" + + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4"), + FAULTY = Set("n5"), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + N = 5, + F = 1, + VIEWS = 0.to(20), + VALID_BLOCKS = Set("val_b0", "val_b1", "val_b2"), + INVALID_BLOCKS = Set("inv_b3") + ) as i from "./replica" + + var old_replica_state: Option[ReplicaPubKey -> ReplicaState] + + action init = all { + i::init, + old_replica_state' = None, + } + + action step = all { + old_replica_state' = Some(i::replica_state), + i::step, + } + + // Check the phase transitions as a state invariant: + // Only two possible phase paths: + // Within the same view, a replica’s phase can only change through two paths. + // 1) Prepare→Commit→Timeout or 2) Prepare→Timeout. + val phase_inv = { + def for_replica(id: ReplicaPubKey): bool = { + match (old_replica_state) { + | None => + true + | Some(old) => + val same_view = (old.get(id).view == i::replica_state.get(id).view) + val old_phase = old.get(id).phase + val new_phase = i::replica_state.get(id).phase + if (same_view) or { + old_phase == new_phase, + old_phase == PhasePrepare and new_phase == PhaseCommit, + old_phase == PhaseCommit and new_phase == PhaseTimeout, + old_phase == PhasePrepare and new_phase == PhaseTimeout, + } else or { + new_phase == PhasePrepare, + old_phase == PhaseCommit or old_phase == PhaseTimeout + } + } + } + + i::CORRECT.forall(id => for_replica(id)) + } +} \ No newline at end of file diff --git a/spec/protocol-spec/tests/tests_n6f1b0.qnt b/spec/protocol-spec/tests/tests_n6f1b0.qnt new file mode 100644 index 00000000..ee9edd9e --- /dev/null +++ b/spec/protocol-spec/tests/tests_n6f1b0.qnt @@ -0,0 +1,379 @@ +// tests for the configuration of: N = 6, F = 1, and 0 Byzantine faults +module tests_n6f1b0 { + import types.* from "../types" + import defs.* from "../defs" + import option.* from "../option" + + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4", "n5"), + FAULTY = Set(), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1", "val_b2"), + INVALID_BLOCKS = Set("inv_b3") + ).* from "../replica" + + // an initial quorum certificate + pure val init_timeout_qc: TimeoutQC = { + votes: CORRECT.mapBy(id => { view: 0, high_vote: COMMIT_VOTE_NONE, high_commit_qc: HIGH_COMMIT_QC_NONE}), + agg_sig: CORRECT, + ghost_view: 0, + } + + def replica_committed(id: ReplicaId): List[Block] = { + replica_state.get(id).committed_blocks.foldl([], (bs, cb) => bs.append(cb.block)) + } + + // The case of 6 correct replicas, but one of them being slow. + // This requires 56 steps. + run replicas_one_slow_Test = { + init_view_1_with_leader(Map(0 -> "n0", 1 -> "n0", 2 -> "n1", 3 -> "n2", 4 -> "n3")) + // the leader of view 0 proposes + .then(all { + proposer_step("n0", "val_b0"), + unchanged_leader_replica, + }) + .expect(all_invariants) + // replicas process the propose message + .then(five_replicas_get_propose("val_b0", 0, "n0", Timeout(init_timeout_qc))) + .expect(all_invariants) + // the processes n0, n1, n2, n3, n4 finalize the block + .then(replica_commits("n0", "val_b0", 0, 1)) + .expect(all_invariants) + .then(replica_commits("n1", "val_b0", 0, 1)) + .expect(all_invariants) + .then(replica_commits("n2", "val_b0", 0, 1)) + .expect(all_invariants) + .then(replica_commits("n3", "val_b0", 0, 1)) + .expect(all_invariants) + .then(replica_commits("n4", "val_b0", 0, 1)) + .expect(all_invariants) + // the leader of view 1 proposes + .then(all { + proposer_step("n1", "val_b1"), + unchanged_leader_replica, + }) + .expect(all_invariants) + // replicas process the propose message + .then(five_replicas_get_propose("val_b1", 1, "n1", + Commit({ + vote: { + view: 1, block_number: 0, block_hash: "val_b0", + }, + agg_sig: Set("n0", "n1", "n2", "n3", "n4") + }))) + .expect(all_invariants) + // the processes n0, n1, n2, n3, n4 finalize the block + .then(replica_commits("n0", "val_b1", 1, 2)) + .expect(all_invariants) + .then(replica_commits("n1", "val_b1", 1, 2)) + .expect(all_invariants) + .then(replica_commits("n2", "val_b1", 1, 2)) + .expect(all_invariants) + .then(replica_commits("n3", "val_b1", 1, 2)) + .expect(all_invariants) + .then(replica_commits("n4", "val_b1", 1, 2)) + .expect(all_invariants) + .then(all { + assert(replica_committed("n0") == ["val_b0", "val_b1"]), + assert(replica_committed("n1") == ["val_b0", "val_b1"]), + assert(replica_committed("n2") == ["val_b0", "val_b1"]), + assert(replica_committed("n3") == ["val_b0", "val_b1"]), + assert(replica_committed("n4") == ["val_b0", "val_b1"]), + unchanged_all, + }) + .expect(all_invariants) + // the slow replica n5 tries to catch up and receives the message for view 2 + .then(replica_commits("n5", "val_b1", 1, 2)) + .expect(all_invariants) + // Now the slow replica n5 has block number 1 but no block number 0. + // Moreover, n5 cannot receive block number 0 any longer. + .then(all { + assert(replica_state.get("n5").view == 3), + assert(replica_committed("n0") == ["val_b0", "val_b1"]), + assert(replica_committed("n1") == ["val_b0", "val_b1"]), + assert(replica_committed("n2") == ["val_b0", "val_b1"]), + assert(replica_committed("n3") == ["val_b0", "val_b1"]), + assert(replica_committed("n4") == ["val_b0", "val_b1"]), + // the slow replica does not finalize the block, even though it has committed it + assert(replica_committed("n5") == []), + all_invariants, + unchanged_all, + }) + } + + run fab_bad_scenario_Test = { + + val proposal_l5_v1_b0: Proposal = { + block: Some("val_b0"), + sig: "n5", + justification: Timeout(init_timeout_qc), + ghost_block_number: 0, + } + + val commit_vote_v1_b0: CommitVote = { + view: 1, + block_number: 0, + block_hash: "val_b0" + } + val timeout_qc_v1: TimeoutQC = { + votes: Map( + "n0" -> { view: 2, high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(commit_vote_v1_b0) }, + "n1" -> { view: 2, high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(commit_vote_v1_b0) }, + "n2" -> { view: 2, high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(commit_vote_v1_b0) }, + "n3" -> { view: 2, high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(commit_vote_v1_b0) }, + "n5" -> { view: 2, high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(commit_vote_v1_b0) } + ), + agg_sig: Set("n0", "n1", "n2", "n3", "n5"), + ghost_view: 2 + } + init_view_1_with_leader(Map(0 -> "n0", 1 -> "n5", 2 -> "n1", 3 -> "n2", 4 -> "n3", 5 -> "n4")) + // the leader proposes, all replicas in view=1 + .then(all { + replica_state.get("n0").view==1, unchanged_all + }) + .expect(all_invariants) + .then(all { + proposer_step("n5", "val_b0"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_proposal("n0", proposal_l5_v1_b0), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_proposal("n1", proposal_l5_v1_b0), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_proposal("n2", proposal_l5_v1_b0), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_proposal("n3", proposal_l5_v1_b0), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_proposal("n5", proposal_l5_v1_b0), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_commit("n1", {sig: "n0", vote: commit_vote_v1_b0}), + unchanged_leader_replica + }) + .expect(all_invariants) + .then(all { + on_commit("n1", {sig: "n1", vote: commit_vote_v1_b0}), + unchanged_leader_replica + }) + .expect(all_invariants) + .then(all { + on_commit("n1", {sig: "n2", vote: commit_vote_v1_b0}), + unchanged_leader_replica + }) + .expect(all_invariants) + .then(all { + on_commit("n1", {sig: "n3", vote: commit_vote_v1_b0}), + unchanged_leader_replica + }) + .expect(all_invariants) + .then(all { + on_commit("n1", {sig: "n5", vote: commit_vote_v1_b0}), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + replica_state.get("n0").view==1, unchanged_all + }) + .expect(all_invariants) + .then(all { + replica_state.get("n1").view==2, unchanged_all + }) + .expect(all_invariants) + // Delay all further messages until new view starts. + .then(all { + on_timer_is_finished("n0"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_timer_is_finished("n2"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_timer_is_finished("n3"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_timer_is_finished("n4"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_timer_is_finished("n5"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + replica_state.get("n0").view==1, unchanged_all + }) + .expect(all_invariants) + .then(all { + replica_state.get("n0").high_commit_qc==HIGH_COMMIT_QC_NONE, unchanged_all + }) + .expect(all_invariants) + .then(all { + replica_state.get("n0").high_vote==Some(commit_vote_v1_b0), unchanged_all + }) + .expect(all_invariants) + // replica n1 has high_vote and high_commit_qc + .then(all { + replica_state.get("n1").view==2, unchanged_all + }) + .expect(all_invariants) + .then(all { + replica_state.get("n1").high_vote==Some(commit_vote_v1_b0), unchanged_all + }) + .expect(all_invariants) + .then(all { + replica_state.get("n1").high_commit_qc==Some({ vote: commit_vote_v1_b0, agg_sig: Set("n0", "n1", "n2", "n3", "n5")}), unchanged_all + }) + .expect(all_invariants) + .then(replica_receives_all_timeout_votes("n0", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(replica_receives_all_timeout_votes("n2", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(replica_receives_all_timeout_votes("n3", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(replica_receives_all_timeout_votes("n4", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(replica_receives_all_timeout_votes("n5", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(all { + proposer_step("n1", "val_b1"), + unchanged_leader_replica, + }) + } + + action all_replicas_get_propose(block: Block, block_number: BlockNumber, + leader: ReplicaId, justification: Justification): bool = { + val proposal: Proposal = { + block: Some(block), + sig: leader, + justification: justification, + ghost_block_number: block_number, + } + all { + on_proposal("n0", proposal), + unchanged_leader_replica, + }.then(all { + on_proposal("n1", proposal), + unchanged_leader_replica, + }) + .then(all { + on_proposal("n2", proposal), + unchanged_leader_replica, + }) + .then(all { + on_proposal("n3", proposal), + unchanged_leader_replica, + }) + .then(all { + on_proposal("n4", proposal), + unchanged_leader_replica, + }) + } + + action replica_receives_all_timeout_votes(id: ReplicaId, high_vote: Option[CommitVote], high_commit_qc: Option[CommitQC], view: ViewNumber):bool = all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n0"} ), + unchanged_leader_replica, + } + .then(all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n1"} ), + unchanged_leader_replica, + }) + .then(all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n2"} ), + unchanged_leader_replica, + }) + .then(all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n3"} ), + unchanged_leader_replica, + }) + .then(all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n4"} ), + unchanged_leader_replica, + }) + + action five_replicas_get_propose(block: Block, block_number: BlockNumber, + leader: ReplicaId, justification: Justification): bool = { + pure val proposal = { + block: Some(block), + sig: leader, + justification: justification, + ghost_block_number: block_number + } + all { + on_proposal("n0", proposal), + unchanged_leader_replica, + }.then(all { + on_proposal("n1", proposal), + unchanged_leader_replica, + }) + .then(all { + on_proposal("n2", proposal), + unchanged_leader_replica, + }) + .then(all { + on_proposal("n3", proposal), + unchanged_leader_replica, + }) + .then(all { + on_proposal("n4", proposal), + unchanged_leader_replica, + }) + } + + action replica_commits(id: ReplicaId, block_hash: BlockHash, block_number: BlockNumber, view: ViewNumber):bool = all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n0"}), + unchanged_leader_replica, + }.then(all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n1"}), + unchanged_leader_replica, + }).then(all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n2"}), + unchanged_leader_replica, + }).then(all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n3"}), + unchanged_leader_replica, + }).then(all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n4"}), + unchanged_leader_replica, + }) + + action unchanged_leader_replica = all { + leader' = leader, + replica_view' = replica_view, + } + + action unchanged_all = all { + msgs_signed_timeout' = msgs_signed_timeout, msgs_new_view' = msgs_new_view, + msgs_proposal' = msgs_proposal, msgs_signed_commit' = msgs_signed_commit, + store_signed_timeout' = store_signed_timeout, store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, + leader' = leader, + replica_view' = replica_view, replica_state' = replica_state, + ghost_step' = ghost_step, + } +} \ No newline at end of file diff --git a/spec/protocol-spec/tests/tests_n6f1b1.qnt b/spec/protocol-spec/tests/tests_n6f1b1.qnt new file mode 100644 index 00000000..49bcea9e --- /dev/null +++ b/spec/protocol-spec/tests/tests_n6f1b1.qnt @@ -0,0 +1,360 @@ +// tests for the configuration of: N = 6, F = 1, and 1 Byzantine faults +module tests { + import types.* from "../types" + import defs.* from "../defs" + import option.* from "../option" + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4"), + FAULTY = Set("n5"), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1", "val_b2"), + INVALID_BLOCKS = Set("inv_b3") + ).* from "../replica" + + // an initial quorum certificate from all replicas + val init_timeout_qc: TimeoutQC = { + votes: REPLICAS.mapBy(id => { view: 0, high_vote: None, high_commit_qc: None}), + agg_sig: REPLICAS, + ghost_view: 0, + } + + // an initial quorum certificate from the correct replicas + val init_timeout_corr_qc: TimeoutQC = { + votes: CORRECT.mapBy(id => { view: 0, high_vote: None, high_commit_qc: None}), + agg_sig: CORRECT, + ghost_view: 0, + } + + def replica_committed(id: ReplicaId): List[Block] = { + replica_state.get(id).committed_blocks.foldl([], (bs, cb) => bs.append(cb.block)) + } + + run replicas_bootstrap_in_view0_Test = { + pure val timeout_votes0 = CORRECT.map(id => { + sig: sig_of_id(id), + vote: { view: 0, high_vote: COMMIT_VOTE_NONE, high_commit_qc: HIGH_COMMIT_QC_NONE}, + }) + init_view_0_with_leader(Map(0 -> "n0", 1 -> "n0", 2 -> "n1", 3 -> "n2", 4 -> "n3")) + .then(replica_receives_all_timeout_votes("n0", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 0)) + .expect(all_invariants) + .then(replica_receives_new_view("n1", Timeout(init_timeout_corr_qc))) + .expect(all_invariants) + .then(replica_receives_new_view("n1", Timeout(init_timeout_corr_qc))) + .expect(all_invariants) + .then(replica_receives_new_view("n2", Timeout(init_timeout_corr_qc))) + .expect(all_invariants) + .then(replica_receives_new_view("n3", Timeout(init_timeout_corr_qc))) + .expect(all_invariants) + .then(replica_receives_new_view("n4", Timeout(init_timeout_corr_qc))) + .expect(all_invariants) + .then(all { + CORRECT.forall(id => { + val state = replica_state.get(id) + all { + assert(state.phase == PhasePrepare), + assert(state.high_timeout_qc == Some(init_timeout_corr_qc)), + assert(state.high_vote == COMMIT_VOTE_NONE), + assert(state.high_commit_qc == HIGH_COMMIT_QC_NONE), + } + }), + assert(replica_view == CORRECT.mapBy(_ => 0)), + assert(proposer_view == CORRECT.mapBy(_ => 0)), + assert(msgs_signed_timeout == timeout_votes0), + assert(msgs_new_view == CORRECT.map(id => { + { sig: id, justification: Timeout(init_timeout_corr_qc) } + })), + assert(msgs_signed_commit == Set()), + assert(msgs_proposal == Set()), + assert(store_signed_timeout == CORRECT.mapBy(id => { + if (id == "n0") timeout_votes0 else Set() + })), + assert(store_signed_commit == CORRECT.mapBy(id => Set())), + assert(CORRECT.forall(id => replica_state.get(id).cached_proposals == Set())), + assert(ghost_justifications == + tuples(CORRECT, Set(1)).mapBy(((id, view)) => Timeout(init_timeout_corr_qc))), + assert(CORRECT.forall(id => replica_state.get(id).committed_blocks == [])), + assert(all_replicas_high_timeout_qc_inv), + unchanged_all, + }) + .expect(all_invariants) + } + + run replicas_in_view1_Test = { + init_view_1_with_leader(Map(0 -> "n0", 1 -> "n0", 2 -> "n1", 3 -> "n2", 4 -> "n3")) + .then(all { + assert(all_replicas_high_timeout_qc_inv), + unchanged_all, + }) + .expect(all_invariants) + } + + // the leaders propose a block and all replicas commit that block + run replicas_normal_case_Test = { + init_view_1_with_leader(Map(0 -> "n0", 1 -> "n0", 2 -> "n1", 3 -> "n2", 4 -> "n3")) + // the leader proposes + .then(all { + proposer_step("n0", "val_b0"), + unchanged_leader_replica, + }) + .expect(all_invariants) + // replicas process the propose message + .then(all_replicas_get_propose("val_b0", 0, "n0", Timeout(init_timeout_qc))) + .expect(all_invariants) + // n0 process the commit messages + .then(replica_commits("n0", "val_b0", 0, 1)) + .expect(all_invariants) + // n1 process the commit messages + .then(replica_commits("n1", "val_b0", 0, 1)) + .expect(all_invariants) + // n2 process the commit messages + .then(replica_commits("n2", "val_b0", 0, 1)) + .expect(all_invariants) + // n3 process the commit messages + .then(replica_commits("n3", "val_b0", 0, 1)) + .expect(all_invariants) + // n4 process the commit messages + .then(replica_commits("n4", "val_b0", 0, 1)) + .expect(all_invariants) + .then(all { + assert(replica_committed("n0") == ["val_b0"]), + assert(replica_committed("n1") == ["val_b0"]), + assert(replica_committed("n2") == ["val_b0"]), + assert(replica_committed("n3") == ["val_b0"]), + assert(replica_committed("n4") == ["val_b0"]), + unchanged_all, + }) + .expect(all_invariants) + // the new leader proposes + .then(all { + proposer_step("n1", "val_b1"), + unchanged_leader_replica, + }) + .expect(all_invariants) + // replicas process the propose message + .then(all_replicas_get_propose("val_b1", 1, "n1", + Commit({ + vote:{view: 1, block_number: 0, block_hash: "val_b0"}, + agg_sig: Set("n0", "n1", "n2", "n3", "n4") + }))) + .expect(all_invariants) + // n0 process the commit messages + .then(replica_commits("n0", "val_b1", 1, 2)) + .expect(all_invariants) + // n1 process the commit messages + .then(replica_commits("n1", "val_b1", 1, 2)) + .expect(all_invariants) + // n2 process the commit messages + .then(replica_commits("n2", "val_b1", 1, 2)) + .expect(all_invariants) + // n3 process the commit messages + .then(replica_commits("n3", "val_b1", 1, 2)) + .expect(all_invariants) + // n4 process the commit messages + .then(replica_commits("n4", "val_b1", 1, 2)) + .expect(all_invariants) + .then(all { + assert(replica_committed("n0") == [ "val_b0", "val_b1" ]), + assert(replica_committed("n1") == [ "val_b0", "val_b1" ]), + assert(replica_committed("n2") == [ "val_b0", "val_b1" ]), + assert(replica_committed("n3") == [ "val_b0", "val_b1" ]), + assert(replica_committed("n4") == [ "val_b0", "val_b1" ]), + unchanged_all, + }) + .expect(all_invariants) + // the new leader proposes + .then(all { + proposer_step("n2", "val_b2"), + unchanged_leader_replica, + }) + .expect(all_invariants) + // replicas process the propose message + .then(all_replicas_get_propose("val_b2", 2, "n2", + Commit({ + vote: { + view: 2, + block_number: 1, + block_hash: "val_b1", + }, + agg_sig: Set("n0", "n1", "n2", "n3", "n4") + }))) + .expect(all_invariants) + // n0 process the commit messages + .then(replica_commits("n0", "val_b2", 2, 3)) + .expect(all_invariants) + // n1 process the commit messages + .then(replica_commits("n1", "val_b2", 2, 3)) + .expect(all_invariants) + // n2 process the commit messages + .then(replica_commits("n2", "val_b2", 2, 3)) + .expect(all_invariants) + // n3 process the commit messages + .then(replica_commits("n3", "val_b2", 2, 3)) + .expect(all_invariants) + // n4 process the commit messages + .then(replica_commits("n4", "val_b2", 2, 3)) + .expect(all_invariants) + .then(all { + assert(replica_committed("n0") == [ "val_b0", "val_b1", "val_b2" ]), + assert(replica_committed("n1") == [ "val_b0", "val_b1", "val_b2" ]), + assert(replica_committed("n2") == [ "val_b0", "val_b1", "val_b2" ]), + assert(replica_committed("n3") == [ "val_b0", "val_b1", "val_b2" ]), + assert(replica_committed("n4") == [ "val_b0", "val_b1", "val_b2" ]), + unchanged_all, + }) + .expect(all_invariants) + } + + run view0_change_in_view0_Test = { + init_view_1_with_leader(Map(0 -> "n0", 1 -> "n0", 2 -> "n1", 3 -> "n2", 4 -> "n3")) + // the leader proposes + .then(all { + proposer_step("n0", "val_b0"), + unchanged_leader_replica, + }) + .expect(all_invariants) + // all messages are lost + .then(all { + on_timer_is_finished("n1"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_timer_is_finished("n2"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_timer_is_finished("n3"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + on_timer_is_finished("n4"), + unchanged_leader_replica, + }) + .expect(all_invariants) + // all replicas send TimeoutVote to each other, each replica gets all messages from all other replicas including itself + .then(replica_receives_all_timeout_votes("n0", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(replica_receives_all_timeout_votes("n1", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(replica_receives_all_timeout_votes("n2", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(replica_receives_all_timeout_votes("n3", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(replica_receives_all_timeout_votes("n4", COMMIT_VOTE_NONE, HIGH_COMMIT_QC_NONE, 1)) + .expect(all_invariants) + .then(all { + proposer_step("n1", "val_b1"), + unchanged_leader_replica, + }) + .expect(all_invariants) + .then(all { + assert(replica_state.get("n0").high_timeout_qc.option_has(qc => qc.votes.keys().size()==5)), + assert(replica_state.get("n1").high_timeout_qc.option_has(qc => qc.votes.keys().size()==5)), + assert(replica_state.get("n2").high_timeout_qc.option_has(qc => qc.votes.keys().size()==5)), + assert(replica_state.get("n3").high_timeout_qc.option_has(qc => qc.votes.keys().size()==5)), + assert(replica_state.get("n4").high_timeout_qc.option_has(qc => qc.votes.keys().size()==5)), + + assert(replica_state.get("n0").high_timeout_qc.option_has(qc => qc.agg_sig.size()==5)), + assert(replica_state.get("n1").high_timeout_qc.option_has(qc => qc.agg_sig.size()==5)), + assert(replica_state.get("n2").high_timeout_qc.option_has(qc => qc.agg_sig.size()==5)), + assert(replica_state.get("n3").high_timeout_qc.option_has(qc => qc.agg_sig.size()==5)), + assert(replica_state.get("n4").high_timeout_qc.option_has(qc => qc.agg_sig.size()==5)), + unchanged_all, + }) + .expect(all_invariants) + } + + action all_replicas_get_propose(block: Block, block_number: BlockNumber, + leader: ReplicaId, justification: Justification): bool = { + val proposal: Proposal = { + block: Some(block), + sig: leader, + justification: justification, + ghost_block_number: block_number, + } + all { + on_proposal("n0", proposal), + unchanged_leader_replica, + }.then(all { + on_proposal("n1", proposal), + unchanged_leader_replica, + }) + .then(all { + on_proposal("n2", proposal), + unchanged_leader_replica, + }) + .then(all { + on_proposal("n3", proposal), + unchanged_leader_replica, + }) + .then(all { + on_proposal("n4", proposal), + unchanged_leader_replica, + }) + } + + action replica_receives_all_timeout_votes(id: ReplicaId, high_vote: Option[CommitVote], high_commit_qc: Option[CommitQC], view: ViewNumber):bool = all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n0"} ), + unchanged_leader_replica, + } + .then(all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n1"} ), + unchanged_leader_replica, + }) + .then(all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n2"} ), + unchanged_leader_replica, + }) + .then(all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n3"} ), + unchanged_leader_replica, + }) + .then(all { + on_timeout(id, {vote: { view: view, high_commit_qc: high_commit_qc, high_vote: high_vote}, sig: "n4"} ), + unchanged_leader_replica, + }) + + + action replica_commits(id: ReplicaId, block_hash: BlockHash, block_number: BlockNumber, view: ViewNumber):bool = all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n0"}), + unchanged_leader_replica, + }.then(all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n1"}), + unchanged_leader_replica, + }).then(all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n2"}), + unchanged_leader_replica, + }).then(all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n3"}), + unchanged_leader_replica, + }).then(all { + on_commit(id, { vote: {block_hash: block_hash, block_number: block_number, view: view }, sig:"n4"}), + unchanged_leader_replica, + }) + + action replica_receives_new_view(id: ReplicaId, justification: Justification): bool = all { + on_new_view(id, { sig: id, justification: Timeout(init_timeout_corr_qc) }), + unchanged_leader_replica, + } + + action unchanged_leader_replica = all { + leader' = leader, + replica_view' = replica_view, + } + + action unchanged_all = all { + msgs_signed_timeout' = msgs_signed_timeout, msgs_new_view' = msgs_new_view, + msgs_proposal' = msgs_proposal, msgs_signed_commit' = msgs_signed_commit, + store_signed_timeout' = store_signed_timeout, store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, + ghost_step' = ghost_step, + leader' = leader, + replica_view' = replica_view, replica_state' = replica_state, + } +} \ No newline at end of file diff --git a/spec/protocol-spec/tests/tests_n6f1b2.qnt b/spec/protocol-spec/tests/tests_n6f1b2.qnt new file mode 100644 index 00000000..b80cfd38 --- /dev/null +++ b/spec/protocol-spec/tests/tests_n6f1b2.qnt @@ -0,0 +1,243 @@ +// tests for the configuration of: N = 6, F = 1, and 2 Byzantine faults +module tests_n6f1b2 { + import types.* from "../types" + import defs.* from "../defs" + import option.* from "../option" + + import replica( + CORRECT = Set("n0", "n1", "n2", "n3"), + FAULTY = Set("n4", "n5"), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ).* from "../replica" + + // an initial quorum certificate + pure val init_timeout_qc: TimeoutQC = { + votes: REPLICAS.mapBy(id => { view: 0, high_vote: COMMIT_VOTE_NONE, high_commit_qc: HIGH_COMMIT_QC_NONE}), + agg_sig: REPLICAS, + ghost_view: 0, + } + + def replica_committed(id: ReplicaId): List[Block] = { + replica_state.get(id).committed_blocks.foldl([], (bs, cb) => bs.append(cb.block)) + } + + // This test demonstrates that two Byzantine replicas can convince three correct replicas + // to finalize two different blocks. + // 29 steps. + run disagreement_Test = { + val high_vote_b0 = Some({ view: 1, block_number: 0, block_hash: "val_b0" }) + val high_vote_b1 = Some({ view: 1, block_number: 0, block_hash: "val_b1" }) + val timeout_vote_b0: TimeoutVote = { view: 1, high_commit_qc: None, high_vote: high_vote_b0 } + val timeout_vote_b1: TimeoutVote = { view: 1, high_commit_qc: None, high_vote: high_vote_b1 } + // n4 and n5 partition the correct replicas into n0, n1, n2 and n1, n2, n3 + val timeout_qc: TimeoutQC = { + ghost_view: 1, + agg_sig: Set("n1", "n2", "n3", "n4", "n5"), + votes: Set("n1", "n2", "n3", "n4", "n5").mapBy(id => { + if (Set("n1", "n2").contains(id)) { + timeout_vote_b0 + } else { + timeout_vote_b1 + } + }) + } + val new_view_timeout: NewView = { + sig: "n4", + justification: Timeout(timeout_qc), + } + val proposal_b0_in_1: Proposal = { + block: Some("val_b0"), + justification: Timeout(init_timeout_qc), + sig: "n4", + ghost_block_number: 0, + } + val proposal_b1_in_1: Proposal = { + block: Some("val_b1"), + justification: Timeout(init_timeout_qc), + sig: "n4", + ghost_block_number: 0, + } + val proposal_b1_in_2: Proposal = { + block: None, + justification: new_view_timeout.justification, + sig: "n5", + ghost_block_number: 0, + } + init_view_1_with_leader(Map(0 -> "n0", 1 -> "n4", 2 -> "n5", 3 -> "n2", 4 -> "n3", 5 -> "n5")) + // the faulty leader n4 of view 1 proposes val_b0 + .then(all { + msgs_proposal' = msgs_proposal.union(Set(proposal_b0_in_1)), + unchanged_all_but_proposal, + }) + .expect(all_invariants) + // n0, n1, n2 receive the proposal + .then(all { on_proposal("n0", proposal_b0_in_1), unchanged_leader_replica }) + .then(all { on_proposal("n1", proposal_b0_in_1), unchanged_leader_replica }) + .then(all { on_proposal("n2", proposal_b0_in_1), unchanged_leader_replica }) + .expect(all_invariants) + // the faulty replicas n4 and n5 send commit votes for val_b0 + .then(all { + val commit_vote_n4: SignedCommitVote = { + vote: { block_hash: "val_b0", block_number: 0, view: 1 }, + sig: "n4", + } + val commit_vote_n5: SignedCommitVote = { + vote: { block_hash: "val_b0", block_number: 0, view: 1 }, + sig: "n5", + } + msgs_signed_commit' = msgs_signed_commit.union(Set(commit_vote_n4, commit_vote_n5)), + unchanged_all_but_commit_vote, + }) + // the correct replica n0 finalizes the block val_b0 + .then(replica_on_commit("n0", "val_b0", 0, 1, "n0")) + .then(replica_on_commit("n0", "val_b0", 0, 1, "n1")) + .then(replica_on_commit("n0", "val_b0", 0, 1, "n2")) + .then(replica_on_commit("n0", "val_b0", 0, 1, "n4")) + .then(replica_on_commit("n0", "val_b0", 0, 1, "n5")) + .expect(all_invariants) + .then(all { + assert(replica_committed("n0") == [ "val_b0" ]), + unchanged_all, + }) + // the faulty leader n4 of view 1 proposes again, but this time it proposes val_b1 + .then(all { + msgs_proposal' = msgs_proposal.union(Set(proposal_b1_in_1)), + unchanged_all_but_proposal, + }) + .expect(all_invariants) + // n3 receives the second proposal and sets its high_vote to val_b1 + .then(all { on_proposal("n3", proposal_b1_in_1), unchanged_leader_replica }) + .expect(all_invariants) + // n1, n2, n3 time out + .then(all { on_timer_is_finished("n1"), unchanged_leader_replica }) + .then(all { on_timer_is_finished("n2"), unchanged_leader_replica }) + .then(all { on_timer_is_finished("n3"), unchanged_leader_replica }) + // n4 and n5 send SignedTimeoutVote and NewView + .then(all { + val timeout_vote_n4: SignedTimeoutVote = { + vote: timeout_vote_b1, + sig: "n4", + } + val timeout_vote_n5: SignedTimeoutVote = { + vote: timeout_vote_b1, + sig: "n5", + } + all { + msgs_signed_timeout' = msgs_signed_timeout.union(Set(timeout_vote_n4, timeout_vote_n5)), + msgs_new_view' = msgs_new_view.union(Set(new_view_timeout)), + unchanged_all_but_timeout_vote_and_new_view, + } + }) + // n1, n2, n3 receive NewView and start view 2 + .then(all { on_new_view("n1", new_view_timeout), unchanged_leader_replica }) + .then(all { on_new_view("n2", new_view_timeout), unchanged_leader_replica }) + .then(all { on_new_view("n3", new_view_timeout), unchanged_leader_replica }) + // n5 re-proposes val_b1 in view 2 + .then(all { + msgs_proposal' = msgs_proposal.union(Set(proposal_b1_in_2)), + unchanged_all_but_proposal, + }) + .expect(all_invariants) + // n1, n2, n3 receive the re-proposal for val_b1 and send their commit votes + .then(all { on_proposal("n1", proposal_b1_in_2), unchanged_leader_replica }) + .then(all { on_proposal("n2", proposal_b1_in_2), unchanged_leader_replica }) + .then(all { on_proposal("n3", proposal_b1_in_2), unchanged_leader_replica }) + .expect(all_invariants) + // n4 and n5 send SignedCommitVote for val_b1 in view 2 + .then(all { + val commit_vote_n4: SignedCommitVote = { + vote: { block_hash: "val_b1", block_number: 0, view: 2 }, + sig: "n4", + } + val commit_vote_n5: SignedCommitVote = { + vote: { block_hash: "val_b1", block_number: 0, view: 2 }, + sig: "n5", + } + msgs_signed_commit' = msgs_signed_commit.union(Set(commit_vote_n4, commit_vote_n5)), + unchanged_all_but_commit_vote, + }) + // the correct replica n3 received 4 on_commit votes + .then(replica_on_commit("n3", "val_b1", 0, 2, "n1")) + .then(replica_on_commit("n3", "val_b1", 0, 2, "n2")) + .then(replica_on_commit("n3", "val_b1", 0, 2, "n3")) + .then(replica_on_commit("n3", "val_b1", 0, 2, "n4")) + .expect(all_invariants) + // this last on_commit finalizes the block val_b1 in view 2 and breaks our invariants + .then(replica_on_commit("n3", "val_b1", 0, 2, "n5")) + .then(all { + assert(replica_committed("n0") == [ "val_b0" ]), + assert(replica_committed("n3") == [ "val_b1" ]), + // agreement is violated, as expected + assert(not(agreement_inv)), + unchanged_all, + }) + } + + action replica_on_commit(id: ReplicaId, block_hash: BlockHash, block_number: BlockNumber, view: ViewNumber, sig: ReplicaKey): bool = all { + on_commit(id, { + vote: { block_hash: block_hash, block_number: block_number, view: view }, + sig: sig + }), + unchanged_leader_replica, + } + + action unchanged_leader_replica = all { + leader' = leader, + replica_view' = replica_view, + } + + action unchanged_all = all { + msgs_signed_timeout' = msgs_signed_timeout, msgs_new_view' = msgs_new_view, + msgs_proposal' = msgs_proposal, msgs_signed_commit' = msgs_signed_commit, + store_signed_timeout' = store_signed_timeout, store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, + ghost_step' = ghost_step, + leader' = leader, + replica_view' = replica_view, replica_state' = replica_state, + } + + action unchanged_all_but_proposal = all { + msgs_signed_timeout' = msgs_signed_timeout, msgs_new_view' = msgs_new_view, + msgs_signed_commit' = msgs_signed_commit, + store_signed_timeout' = store_signed_timeout, store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, + ghost_step' = ghost_step, + leader' = leader, + replica_view' = replica_view, replica_state' = replica_state, + } + + action unchanged_all_but_commit_vote = all { + msgs_signed_timeout' = msgs_signed_timeout, msgs_new_view' = msgs_new_view, + msgs_proposal' = msgs_proposal, + store_signed_timeout' = store_signed_timeout, store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, + ghost_step' = ghost_step, + leader' = leader, + replica_view' = replica_view, replica_state' = replica_state, + } + + action unchanged_all_but_timeout_vote = all { + msgs_new_view' = msgs_new_view, + msgs_proposal' = msgs_proposal, msgs_signed_commit' = msgs_signed_commit, + store_signed_timeout' = store_signed_timeout, store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, + ghost_step' = ghost_step, + leader' = leader, + replica_view' = replica_view, replica_state' = replica_state, + } + + action unchanged_all_but_timeout_vote_and_new_view = all { + msgs_proposal' = msgs_proposal, msgs_signed_commit' = msgs_signed_commit, + store_signed_timeout' = store_signed_timeout, store_signed_commit' = store_signed_commit, + proposer_view' = proposer_view, ghost_justifications' = ghost_justifications, + ghost_step' = ghost_step, + leader' = leader, + replica_view' = replica_view, replica_state' = replica_state, + } +} \ No newline at end of file diff --git a/spec/protocol-spec/tests/unit_tests.qnt b/spec/protocol-spec/tests/unit_tests.qnt new file mode 100644 index 00000000..d0a525f2 --- /dev/null +++ b/spec/protocol-spec/tests/unit_tests.qnt @@ -0,0 +1,306 @@ +module tests { + import types.* from "../types" + import defs.* from "../defs" + import option.* from "../option" + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4"), + FAULTY = Set("n5"), + WEIGHTS = Map("n0"->1, "n1"->1, "n2"->1, "n3"->1, "n4"->1, "n5"->1), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4"->"n4", "n5"->"n5"), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1", "val_b2"), + INVALID_BLOCKS = Set("inv_b3") + ).* from "../replica" + + + run weight_Test = all { + TOTAL_WEIGHT == 6, + QUORUM_WEIGHT == 5, + SUBQUORUM_WEIGHT == 3, + FAULTY_WEIGHT == 1 + } + + run get_implied_block_Test = { + pure val commit_vote: CommitVote = { + view: 0, + block_number: 0, + block_hash: hash("aa") + } + pure val commit: Justification = Commit({vote: commit_vote, agg_sig: Set("")}) + + pure val timeout: Justification = Timeout({ + votes: Map(), + agg_sig: Set(""), + ghost_view: 1 + }) + + all { + get_implied_block(commit) == (1, None), + get_implied_block(timeout) == (0, None), + } + } + + val vote1: CommitVote = { + view: 1, + block_number: 1, + block_hash: "val_b0", + } + + val vote2: CommitVote = { + view: 1, + block_number: 1, + block_hash: "val_b1", + } + + val vote3: CommitVote = { + view: 2, + block_number: 2, + block_hash: "val_b2", + } + + val commit_qc_1: CommitQC = { + vote: vote1, + agg_sig: Set("n0", "n1", "n2", "n3", "n5") + } + + val commit_qc_2: CommitQC = { + vote: vote2, + agg_sig: Set("n0", "n1", "n2", "n3", "n5") + } + + val commit_qc_3: CommitQC = { + vote: vote3, + agg_sig: Set("n0", "n1", "n2", "n3", "n5") + } + + val timeout_qc_1: TimeoutQC = { + agg_sig: Set("n0", "n1", "n2", "n3", "n4", "n5"), + ghost_view: 0, + votes: + Map( + "n0" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 }, + "n1" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 }, + "n2" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 }, + "n3" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 }, + "n4" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 }, + "n5" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 } + ) + } + + val timeout_qc_2: TimeoutQC = { + agg_sig: Set("n0", "n1", "n2", "n3", "n4", "n5"), + ghost_view: 0, + votes: + Map( + "n0" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 0 }, + "n1" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 }, + "n2" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 }, + "n3" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 }, + "n4" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 }, + "n5" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 0 } + ) + } + + val timeout_qc_3: TimeoutQC = { + agg_sig: Set("n0", "n1", "n2", "n3", "n4", "n5"), + ghost_view: 2, + votes: + Map( + "n0" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n1" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n2" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n3" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n4" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n5" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 } + ) + } + + val timeout_qc_4: TimeoutQC = { + agg_sig: Set("n0", "n1", "n2", "n3", "n4", "n5"), + ghost_view: 2, + votes: + Map( + "n0" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n1" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n2" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n3" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n4" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote2), view: 2 }, + "n5" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote2), view: 2 } + ) + } + + val timeout_qc_5: TimeoutQC = { + agg_sig: Set("n0", "n1", "n2", "n3", "n4", "n5"), + ghost_view: 2, + votes: + Map( + "n0" -> { high_commit_qc: Some(commit_qc_1), high_vote: Some(vote1), view: 2 }, + "n1" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote1), view: 2 }, + "n2" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote2), view: 2 }, + "n3" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote2), view: 2 }, + "n4" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote3), view: 2 }, + "n5" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote3), view: 2 } + ) + } + + val timeout_qc_6: TimeoutQC = { + agg_sig: Set("n0", "n1", "n2", "n3", "n4", "n5"), + ghost_view: 2, + votes: + Map( + "n0" -> { high_commit_qc: Some(commit_qc_1), high_vote: Some(vote1), view: 2 }, + "n1" -> { high_commit_qc: Some(commit_qc_2), high_vote: Some(vote1), view: 2 }, + "n2" -> { high_commit_qc: Some(commit_qc_3), high_vote: Some(vote2), view: 2 }, + "n3" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote2), view: 2 }, + "n4" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote3), view: 2 }, + "n5" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote3), view: 2 } + ) + } + + val timeout_qc_7: TimeoutQC = { + agg_sig: Set("n0", "n1", "n2", "n3", "n4", "n5"), + ghost_view: 2, + votes: + Map( + "n0" -> { high_commit_qc: Some(commit_qc_1), high_vote: Some(vote1), view: 2 }, + "n1" -> { high_commit_qc: Some(commit_qc_2), high_vote: Some(vote1), view: 2 }, + "n2" -> { high_commit_qc: Some(commit_qc_3), high_vote: Some(vote1), view: 2 }, + "n3" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote2), view: 2 }, + "n4" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote2), view: 2 }, + "n5" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: Some(vote2), view: 2 } + ) + } + + val timeout_qc_8: TimeoutQC = { + agg_sig: Set("n0", "n1", "n2", "n3", "n4", "n5"), + ghost_view: 2, + votes: + Map( + "n0" -> { high_commit_qc: Some(commit_qc_1), high_vote: Some(vote1), view: 2 }, + "n1" -> { high_commit_qc: Some(commit_qc_1), high_vote: Some(vote1), view: 2 }, + "n2" -> { high_commit_qc: Some(commit_qc_1), high_vote: Some(vote1), view: 2 }, + "n3" -> { high_commit_qc: Some(commit_qc_1), high_vote: Some(vote1), view: 2 }, + "n4" -> { high_commit_qc: Some(commit_qc_1), high_vote: Some(vote1), view: 2 }, + "n5" -> { high_commit_qc: Some(commit_qc_1), high_vote: Some(vote1), view: 2 } + ) + } + + run high_vote_Test = { + + all { + assert(high_vote(timeout_qc_1) == None), + assert(high_vote(timeout_qc_2) == None), + assert(high_vote(timeout_qc_3) == Some(vote1)), + assert(high_vote(timeout_qc_4) == Some(vote1)), + assert(high_vote(timeout_qc_5) == None), + assert(high_vote(timeout_qc_6) == None), + assert(high_vote(timeout_qc_7) == None), + } + + } + + run high_qc_Test = { + all { + assert(high_qc(timeout_qc_1) == HIGH_COMMIT_QC_NONE), + assert(high_qc(timeout_qc_5) == Some(commit_qc_1)), + assert(high_qc(timeout_qc_6) == Some(commit_qc_3)), + } + + } + + run max_timeout_qc_Test = { + all { + max_timeout_qc(HIGH_TIMEOUT_QC_NONE, HIGH_TIMEOUT_QC_NONE) == HIGH_TIMEOUT_QC_NONE, + max_timeout_qc(HIGH_TIMEOUT_QC_NONE, Some(timeout_qc_1)) == Some(timeout_qc_1), + max_timeout_qc(Some(timeout_qc_1), HIGH_TIMEOUT_QC_NONE) == Some(timeout_qc_1), + max_timeout_qc(Some(timeout_qc_7), Some(timeout_qc_1)) == Some(timeout_qc_7), + max_timeout_qc(Some(timeout_qc_1), Some(timeout_qc_7)) == Some(timeout_qc_7), + } + } + + run max_commit_qc_Test = { + all { + max_commit_qc(HIGH_COMMIT_QC_NONE, HIGH_COMMIT_QC_NONE) == HIGH_COMMIT_QC_NONE, + max_commit_qc(Some(commit_qc_1), HIGH_COMMIT_QC_NONE) == Some(commit_qc_1), + max_commit_qc(HIGH_COMMIT_QC_NONE, Some(commit_qc_1)) == Some(commit_qc_1), + max_commit_qc(Some(commit_qc_1), Some(commit_qc_3)) == Some(commit_qc_3), + max_commit_qc(Some(commit_qc_3), Some(commit_qc_1)) == Some(commit_qc_3), + } + } + + val signed_timeout_votes_1: Set[SignedTimeoutVote] = Set( + { + vote: { + view: 10, + high_vote: COMMIT_VOTE_NONE, + high_commit_qc: HIGH_COMMIT_QC_NONE + }, + sig: "n1", + } + ) + val signed_timeout_votes_2 = Set( + { + vote: { + view: 10, + high_vote: COMMIT_VOTE_NONE, + high_commit_qc: HIGH_COMMIT_QC_NONE + }, + sig: "n2", + }, + { + vote: { + view: 10, + high_vote: COMMIT_VOTE_NONE, + high_commit_qc: HIGH_COMMIT_QC_NONE + }, + sig: "n3", + }, + { + vote: { + view: 10, + high_vote: COMMIT_VOTE_NONE, + high_commit_qc: HIGH_COMMIT_QC_NONE + }, + sig: "n4", + }, + { + vote: { + view: 10, + high_vote: COMMIT_VOTE_NONE, + high_commit_qc: HIGH_COMMIT_QC_NONE + }, + sig: "n1", + }, + { + vote: { + view: 10, + high_vote: COMMIT_VOTE_NONE, + high_commit_qc: HIGH_COMMIT_QC_NONE + }, + sig: "n0", + } + + ) + + run get_timeout_qc_Test = { + val qc = { + ghost_view: 10, + agg_sig: Set("n0", "n1", "n2", "n3", "n4"), + votes: + Map( + "n0" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 10 }, + "n1" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 10 }, + "n2" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 10 }, + "n3" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 10 }, + "n4" -> { high_commit_qc: HIGH_COMMIT_QC_NONE, high_vote: COMMIT_VOTE_NONE, view: 10 } + ) + } + all { + get_timeout_qc(signed_timeout_votes_1, 10) == HIGH_TIMEOUT_QC_NONE, + get_timeout_qc(signed_timeout_votes_2, 10) == Some(qc), + get_timeout_qc(signed_timeout_votes_2, 100) == HIGH_TIMEOUT_QC_NONE + } + } +} \ No newline at end of file diff --git a/spec/protocol-spec/twins_n6f1b1.qnt b/spec/protocol-spec/twins_n6f1b1.qnt new file mode 100644 index 00000000..7346a948 --- /dev/null +++ b/spec/protocol-spec/twins_n6f1b1.qnt @@ -0,0 +1,34 @@ +// This is an explorative module applying the Twins approach +// to model byzantine behaviour with 1 faulty replicas and 5 correct replicas. +module twins { + // A specification instance for n=6, f=1 (threshold), 5 correct replicas, and 1 faulty replicas equivocating + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4", "n5_1", "n5_2"), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4" -> "n4", "n5_1"->"n5", "n5_2"->"n5"), + // Faulty set is empty to disable randomized byzantine behaviour generator and use correct replicas twins instead + FAULTY = Set(), + WEIGHTS = Map("n0" -> 1, "n1" -> 1, "n2" -> 1, "n3" -> 1, "n4" -> 1, "n5_1" -> 1, "n5_2" -> 0), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ) as i from "./replica" + + action init = all { + // all leaders are byzantine: + // non-deterministically choose the leader function to choose between the twins replicas + nondet ldr = i::VIEWS.setOfMaps(Set("n5")).oneOf() + i::init_view_1_with_leader(ldr), + + + // prevent accidental misconfigurations of this module + assert(i::N == i::TOTAL_WEIGHT), + // 2 replicas from the CORRECT set have twins that makes them byzantine + assert(i::N + 1 == i::CORRECT.size()), + assert(i::FAULTY.size()==0) + } + + + action step = i::correct_step +} \ No newline at end of file diff --git a/spec/protocol-spec/twins_n6f1b2.qnt b/spec/protocol-spec/twins_n6f1b2.qnt new file mode 100644 index 00000000..46321eec --- /dev/null +++ b/spec/protocol-spec/twins_n6f1b2.qnt @@ -0,0 +1,34 @@ +// This is an explorative module applying the Twins approach +// to model byzantine behaviour with 2 faulty replicas and 4 correct replicas. +module twins { + // A specification instance for n=6, f=1 (threshold), 4 correct replicas, and 2 faulty replicas equivocating + import replica( + CORRECT = Set("n0", "n1", "n2", "n3", "n4_1", "n4_2", "n5_1", "n5_2"), + REPLICA_KEYS = Map("n0"->"n0", "n1"->"n1", "n2"->"n2", "n3"->"n3", "n4_1" -> "n4", "n4_2" -> "n4", "n5_1"->"n5", "n5_2"->"n5"), + // Faulty set is empty to disable randomized byzantine behaviour generator and use correct replicas twins instead + FAULTY = Set(), + WEIGHTS = Map("n0" -> 1, "n1" -> 1, "n2" -> 1, "n3" -> 1, "n4_1" -> 1, "n4_2" -> 0, "n5_1" -> 1, "n5_2" -> 0), + N = 6, + F = 1, + VIEWS = 0.to(5), + VALID_BLOCKS = Set("val_b0", "val_b1"), + INVALID_BLOCKS = Set("inv_b3") + ) as i from "./replica" + + action init = all { + // all leaders are byzantine: + // non-deterministically choose the leader function to choose between the twins replicas + nondet ldr = i::VIEWS.setOfMaps(Set("n4", "n5")).oneOf() + i::init_view_1_with_leader(ldr), + + + // prevent accidental misconfigurations of this module + assert(i::N == i::TOTAL_WEIGHT), + // 2 replicas from the CORRECT set have twins that makes them byzantine + assert(i::N + 2 == i::CORRECT.size()), + assert(i::FAULTY.size()==0) + } + + + action step = i::correct_step +} \ No newline at end of file diff --git a/spec/protocol-spec/types.qnt b/spec/protocol-spec/types.qnt new file mode 100644 index 00000000..aaeb37fe --- /dev/null +++ b/spec/protocol-spec/types.qnt @@ -0,0 +1,184 @@ +// -*- mode: Bluespec; -*- + +module types { + import option.* from "./option" + + // For specification purposes, a block is just an indivisible string. + // We can simply use name such as "v0", "v1", "v2". What matters here + // is that these names are unique. + type Block = str + + // For specification purposes, we represent a hash of a string `s` as + // the string `s`. This representation is collision-free, + // and we interpret it as opaque. + type BlockHash = str + + // Get the "hash" of a string + pure def hash(b: str): BlockHash = b + + // View number + type ViewNumber = int + + // Block number + type BlockNumber = int + + // Weight + type Weight = int + + // A replica id is a unique replica identifier, e.g., its hardware address. + // For specification purporses, these are just indivisible strings + // such as "n0", "n1", "n2", "n3". What matters to us is that these names are unique. + type ReplicaId = str + + // Each replica id has a private/public key pair assigned to it. + // For testing purposes and for modeling byzantine behavior, + // several replica ids may have the private/key pair. + type ReplicaKey = str + + // Similar to ReplicaId, we model the signature as the replica name. + type Signature = str + + // For specification purposes, an aggregate signature is simply a set + // of signatures. + type AggregateSignature = Set[Signature] + + /// A block with a matching valid certificate. + /// invariants: hash(block) == commit_qc.vote.block_hash + type CommittedBlock = { + block: Block, + commit_qc: CommitQC, + } + + type ReplicaState = { + // The current view number. + view: ViewNumber, + // The phase of the replica. Determines if we already voted for a proposal in this view. + // Phase::Prepare | Phase::Commit | Phase::Timeout + phase: Phase, + // The commit vote with the highest view that this replica has signed, if any. + high_vote: Option[CommitVote], + // The commit quorum certificate with the highest view that this replica + // has observed, if any. + high_commit_qc: Option[CommitQC], + // The timeout quorum certificate with the highest view that this replica + // has observed, if any. + high_timeout_qc: Option[TimeoutQC], + // A cache of not-yet-committed proposals. + // Collects all received proposals sent by the proposers. + // In real implementation this cache would be actively pruned. + // In this Quint specification, blocks and their hashes are identical, + // so we store the set Set[(BlockNumber, BlockHash)] instead of a map + // from (BlockNumber, BlockHash) to Block. + cached_proposals: Set[(BlockNumber, BlockHash)], + // A history of committed blocks. + // invariant: committed_blocks[i].commit_qc.vote.block_number == i + committed_blocks: List[CommittedBlock], + } + + type Phase = PhasePrepare | PhaseCommit | PhaseTimeout + + type Proposal = { + // Block that the leader is proposing, if this is a new proposal. + block: Option[Block], + // What attests to the validity of this proposal. + justification: Justification, + // Signature of the sender. + sig: Signature, + // The block number of the proposed block, if Some; otherwise 0. + // We assume that the implementation can extract the block number from the block. + ghost_block_number: BlockNumber, + } + + type Justification = + // This proposal is being proposed after a view where we finalized a block. + // A commit QC is just a collection of commit votes (with at least + // QUORUM_WEIGHT) for the previous view. Note that the commit votes MUST + // be identical. + Commit(CommitQC) + // This proposal is being proposed after a view where we timed out. + // A timeout QC is just a collection of timeout votes (with at least + // QUORUM_WEIGHT) for the previous view. Unlike with the Commit QC, + // timeout votes don't need to be identical. + | Timeout(TimeoutQC) + + + type CommitVote = { + // The current view. + view: ViewNumber, + // The number of the block that the replica is committing to. + block_number: BlockNumber, + // The hash of the block that the replica is committing to. + block_hash: BlockHash, + } + + type SignedCommitVote = { + // The commit. + vote: CommitVote, + // Signature of the sender. + sig: Signature, + } + + pure val COMMIT_VOTE_NONE: Option[CommitVote] = None + + // If we have 4f+1 identical commit votes from different replicas, we can + // create a commit quorum certificate locally. + type CommitQC = { + // The commit. + vote: CommitVote, + // The aggregate signature of the replicas. We ignore the details here. + // Can be something as simple as a vector of signatures. + agg_sig: AggregateSignature, + } + + pure val HIGH_COMMIT_QC_NONE: Option[CommitQC] = None + + // Timeout votes are sent from replicas to replicas. + type TimeoutVote = { + // The current view. + view: ViewNumber, + // The commit vote with the highest view that this replica has signed. + high_vote: Option[CommitVote], + // The commit quorum certificate with the highest view that this replica + // has observed. + high_commit_qc: Option[CommitQC], + } + + type SignedTimeoutVote = { + // The timeout. + vote: TimeoutVote, + // Signature of the sender. + sig: Signature, + } + + // If we have timeout votes with at least QUORUM_WEIGHT for the same + // view from different replicas, we can create a timeout quorum certificate + // locally. Contrary to the commit QC, this QC aggregates different messages. + type TimeoutQC = { + // A collection of the replica timeout votes, indexed by the corresponding + // validator public key. + // We have to make sure that a vote from each replica appears here at most once. + votes: ReplicaKey -> TimeoutVote, + // The aggregate signature of the replicas. We ignore the details here. + // Can be something as simple as a vector of signatures. + agg_sig: AggregateSignature, + + // current view computed from the votes (not present in the original specification). + ghost_view: ViewNumber, + } + + pure val HIGH_TIMEOUT_QC_NONE: Option[TimeoutQC] = None + + // New view messages are sent from replicas to replicas. + // These are used to synchronize replicas. + type NewView = { + // What attests to the validity of this view change. + justification: Justification, + // Signature of the sender. + sig: Signature, + } + + type StepKind = + InitStep | ProposerStep(ReplicaId) | OnProposalStep(ReplicaId) | OnCommitStep(ReplicaId) + | OnTimeoutStep(ReplicaId) | OnTimerIsFinishedStep(ReplicaId) + | OnNewViewStep({ id: ReplicaId, view: ViewNumber }) | FaultyStep | FetchStep(ReplicaId) | AnyStep +} \ No newline at end of file