::Pair, _, _, _, _, _, _, _, _>(params);
+ // We have a separate function only to be able to use `docify::export` on this piece of
+ // code.
+ Self::launch_slot_based_collator(params, task_manager);
- task_manager.spawn_essential_handle().spawn(
- "collation-task",
- Some("parachain-block-authoring"),
- collation_future,
- );
- task_manager.spawn_essential_handle().spawn(
- "block-builder-task",
- Some("parachain-block-authoring"),
- block_builder_future,
- );
Ok(())
}
}
diff --git a/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs
new file mode 100644
index 000000000000..bc4f36c271fe
--- /dev/null
+++ b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs
@@ -0,0 +1,142 @@
+//! # Enable elastic scaling MVP for a parachain
+//!
+//! This guide assumes full familiarity with Asynchronous Backing and its
+//! terminology, as defined in https://wiki.polkadot.network/docs/maintain-guides-async-backing.
+//! Furthermore, the parachain should have already been upgraded according to the guide.
+//!
+//! ## Quick introduction to elastic scaling
+//!
+//! [Elastic scaling](https://polkadot.network/blog/elastic-scaling-streamling-growth-on-polkadot)
+//! is a feature that will enable parachains to seamlessly scale up/down the number of used cores.
+//! This can be desirable in order to increase the compute or storage throughput of a parachain or
+//! to lower the latency between a transaction being submitted and it getting built in a parachain
+//! block.
+//!
+//! At present, with Asynchronous Backing enabled, a parachain can only include a block on the relay
+//! chain every 6 seconds, irregardless of how many cores the parachain acquires. Elastic scaling
+//! builds further on the 10x throughput increase of Async Backing, enabling collators to submit up
+//! to 3 parachain blocks per relay chain block, resulting in a further 3x throughput increase.
+//!
+//! ## Current limitations of the MVP
+//!
+//! The full implementation of elastic scaling spans across the entire relay/parachain stack and is
+//! still [work in progress](https://github.com/paritytech/polkadot-sdk/issues/1829).
+//! The MVP is still considered experimental software, so stability is not guaranteed.
+//! If you encounter any problems,
+//! [please open an issue](https://github.com/paritytech/polkadot-sdk/issues).
+//! Below are described the current limitations of the MVP:
+//!
+//! 1. **Limited core count**. Parachain block authoring is sequential, so the second block will
+//! start being built only after the previous block is imported. The current block production is
+//! capped at 2 seconds of execution. Therefore, assuming the full 2 seconds are used, a
+//! parachain can only utilise at most 3 cores in a relay chain slot of 6 seconds. If the full
+//! execution time is not being used, higher core counts can be achieved.
+//! 2. **Single collator requirement for consistently scaling beyond a core at full authorship
+//! duration of 2 seconds per block.** Using the current implementation with multiple collators
+//! adds additional latency to the block production pipeline. Assuming block execution takes
+//! about the same as authorship, the additional overhead is equal the duration of the authorship
+//! plus the block announcement. Each collator must first import the previous block before
+//! authoring a new one, so it is clear that the highest throughput can be achieved using a
+//! single collator. Experiments show that the peak performance using more than one collator
+//! (measured up to 10 collators) is utilising 2 cores with authorship time of 1.3 seconds per
+//! block, which leaves 400ms for networking overhead. This would allow for 2.6 seconds of
+//! execution, compared to the 2 seconds async backing enabled.
+//! [More experiments](https://github.com/paritytech/polkadot-sdk/issues/4696) are being
+//! conducted in this space.
+//! 3. **Trusted collator set.** The collator set needs to be trusted until there’s a mitigation
+//! that would prevent or deter multiple collators from submitting the same collation to multiple
+//! backing groups. A solution is being discussed
+//! [here](https://github.com/polkadot-fellows/RFCs/issues/92).
+//! 4. **Fixed scaling.** For true elasticity, the parachain must be able to seamlessly acquire or
+//! sell coretime as the user demand grows and shrinks over time, in an automated manner. This is
+//! currently lacking - a parachain can only scale up or down by “manually” acquiring coretime.
+//! This is not in the scope of the relay chain functionality. Parachains can already start
+//! implementing such autoscaling, but we aim to provide a framework/examples for developing
+//! autoscaling strategies.
+//!
+//! Another hard limitation that is not envisioned to ever be lifted is that parachains which create
+//! forks will generally not be able to utilise the full number of cores they acquire.
+//!
+//! ## Using elastic scaling MVP
+//!
+//! ### Prerequisites
+//!
+//! - Ensure Asynchronous Backing is enabled on the network and you have enabled it on the parachain
+//! using [`crate::guides::async_backing_guide`].
+//! - Ensure the `AsyncBackingParams.max_candidate_depth` value is configured to a value that is at
+//! least double the maximum targeted parachain velocity. For example, if the parachain will build
+//! at most 3 candidates per relay chain block, the `max_candidate_depth` should be at least 6.
+//! - Use a trusted single collator for maximum throughput.
+//! - Ensure enough coretime is assigned to the parachain. For maximum throughput the upper bound is
+//! 3 cores.
+//!
+//! Phase 1 is not needed if using the `polkadot-parachain` binary built
+//! from the latest polkadot-sdk release! Simply pass the `--experimental-use-slot-based` parameter
+//! to the command line and jump to Phase 2.
+//!
+//! The following steps assume using the cumulus parachain template.
+//!
+//! ### Phase 1 - (For custom parachain node) Update Parachain Node
+//!
+//! This assumes you are using
+//! [the latest parachain template](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain).
+//!
+//! This phase consists of plugging in the new slot-based collator.
+//!
+//! 1. In `node/src/service.rs` import the slot based collator instead of the lookahead collator.
+#![doc = docify::embed!("../../cumulus/polkadot-parachain/src/service.rs", slot_based_colator_import)]
+//!
+//! 2. In `start_consensus()`
+//! - Remove the `overseer_handle` param (also remove the
+//! `OverseerHandle` type import if it’s not used elsewhere).
+//! - Rename `AuraParams` to `SlotBasedParams`, remove the `overseer_handle` field and add a
+//! `slot_drift` field with a value of `Duration::from_secs(1)`.
+//! - Replace the single future returned by `aura::run` with the two futures returned by it and
+//! spawn them as separate tasks:
+#![doc = docify::embed!("../../cumulus/polkadot-parachain/src/service.rs", launch_slot_based_collator)]
+//!
+//! 3. In `start_parachain_node()` remove the `overseer_handle` param passed to `start_consensus`.
+//!
+//! ### Phase 2 - Activate fixed factor scaling in the runtime
+//!
+//! This phase consists of a couple of changes needed to be made to the parachain’s runtime in order
+//! to utilise fixed factor scaling.
+//!
+//! First of all, you need to decide the upper limit to how many parachain blocks you need to
+//! produce per relay chain block (in direct correlation with the number of acquired cores). This
+//! should be either 1 (no scaling), 2 or 3. This is called the parachain velocity.
+//!
+//! If you configure a velocity which is different from the number of assigned cores, the measured
+//! velocity in practice will be the minimum of these two.
+//!
+//! The chosen velocity will also be used to compute:
+//! - The slot duration, by dividing the 6000 ms duration of the relay chain slot duration by the
+//! velocity.
+//! - The unincluded segment capacity, by multiplying the velocity with 2 and adding 1 to
+//! it.
+//!
+//! Let’s assume a desired maximum velocity of 3 parachain blocks per relay chain block. The needed
+//! changes would all be done in `runtime/src/lib.rs`:
+//!
+//! 1. Rename `BLOCK_PROCESSING_VELOCITY` to `MAX_BLOCK_PROCESSING_VELOCITY` and increase it to the
+//! desired value. In this example, 3.
+//!
+//! ```ignore
+//! const MAX_BLOCK_PROCESSING_VELOCITY: u32 = 3;
+//! ```
+//!
+//! 2. Set the `MILLISECS_PER_BLOCK` to the desired value.
+//!
+//! ```ignore
+//! const MILLISECS_PER_BLOCK: u32 =
+//! RELAY_CHAIN_SLOT_DURATION_MILLIS / MAX_BLOCK_PROCESSING_VELOCITY;
+//! ```
+//! Note: for a parachain which measures time in terms of its own block number, changing block
+//! time may cause complications, requiring additional changes. See here more information:
+//! [`crate::guides::async_backing_guide#timing-by-block-number`].
+//!
+//! 3. Increase the `UNINCLUDED_SEGMENT_CAPACITY` to the desired value.
+//!
+//! ```ignore
+//! const UNINCLUDED_SEGMENT_CAPACITY: u32 = 2 * MAX_BLOCK_PROCESSING_VELOCITY + 1;
+//! ```
diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs
index 8296ed447e14..9384f4c82ab3 100644
--- a/docs/sdk/src/guides/mod.rs
+++ b/docs/sdk/src/guides/mod.rs
@@ -41,3 +41,6 @@ pub mod async_backing_guide;
/// How to enable metadata hash verification in the runtime.
pub mod enable_metadata_hash;
+
+/// How to enable elastic scaling MVP on a parachain.
+pub mod enable_elastic_scaling_mvp;
diff --git a/master.wasm b/master.wasm
deleted file mode 100644
index 7ebb14371243..000000000000
Binary files a/master.wasm and /dev/null differ
diff --git a/modified.wasm b/modified.wasm
deleted file mode 100644
index 7ebb14371243..000000000000
Binary files a/modified.wasm and /dev/null differ
diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs
index d62a1aef2309..9ad486657512 100644
--- a/polkadot/node/core/pvf/tests/it/main.rs
+++ b/polkadot/node/core/pvf/tests/it/main.rs
@@ -523,20 +523,17 @@ async fn prepare_can_run_serially() {
#[cfg(all(feature = "ci-only-tests", target_os = "linux"))]
#[tokio::test]
async fn all_security_features_work() {
- // Landlock is only available starting Linux 5.13, and we may be testing on an old kernel.
let can_enable_landlock = {
- let sysinfo = sc_sysinfo::gather_sysinfo();
- // The version will look something like "5.15.0-87-generic".
- let version = sysinfo.linux_kernel.unwrap();
- let version_split: Vec<&str> = version.split(".").collect();
- let major: u32 = version_split[0].parse().unwrap();
- let minor: u32 = version_split[1].parse().unwrap();
- if major >= 6 {
- true
- } else if major == 5 {
- minor >= 13
+ let res = unsafe { libc::syscall(libc::SYS_landlock_create_ruleset, 0usize, 0usize, 1u32) };
+ if res == -1 {
+ let err = std::io::Error::last_os_error().raw_os_error().unwrap();
+ if err == libc::ENOSYS {
+ false
+ } else {
+ panic!("Unexpected errno from landlock check: {err}");
+ }
} else {
- false
+ true
}
};
diff --git a/prdoc/pr_4663.prdoc b/prdoc/pr_4663.prdoc
new file mode 100644
index 000000000000..74b1274828d5
--- /dev/null
+++ b/prdoc/pr_4663.prdoc
@@ -0,0 +1,14 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Add elastic scaling MVP guide
+
+doc:
+ - audience: Node Operator
+ description: |
+ Adds a guide for parachains that want to use the experimental elastic scaling MVP.
+ Will be viewable at: https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/enable_elastic_scaling_mvp/index.html
+
+crates:
+ - name: polkadot-parachain-bin
+ bump: none
diff --git a/prdoc/pr_4997.prdoc b/prdoc/pr_4997.prdoc
new file mode 100644
index 000000000000..25620a7e63ea
--- /dev/null
+++ b/prdoc/pr_4997.prdoc
@@ -0,0 +1,20 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Do not crash on block gap in displaced_leaves_after_finalizing
+
+doc:
+ - audience:
+ - Node Operator
+ - Node Dev
+ description: |
+ After recent changes, crashes where occuring when calculating displaced branches after a block was finalized.
+ The reason are block gaps in the finalized chain. When encountering unknown blocks, the node was panicking.
+ This PR introduces changes to tolerate unknown blocks. Leafs that are separated by a gap from the to-be-finalized
+ block are not marked as displaced.
+
+crates:
+- name: sc-client-db
+ bump: none
+- name: sp-blockchain
+ bump: patch
diff --git a/prdoc/pr_5040.prdoc b/prdoc/pr_5040.prdoc
new file mode 100644
index 000000000000..62b175c1d648
--- /dev/null
+++ b/prdoc/pr_5040.prdoc
@@ -0,0 +1,11 @@
+title: Update libp2p-websocket to v0.42.2
+
+doc:
+ - audience: Node Operator
+ description: |
+ Fixes a panic coming from the libp2p-websocket which stops the node.
+ This fix ensures that polling multiple time after error results in an error instead of panics.
+
+crates:
+- name: sc-network
+ bump: minor
diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs
index e95cd9e4ad5f..acd165d91613 100644
--- a/substrate/client/db/src/lib.rs
+++ b/substrate/client/db/src/lib.rs
@@ -2613,6 +2613,35 @@ pub(crate) mod tests {
Ok(header.hash())
}
+ pub fn insert_disconnected_header(
+ backend: &Backend,
+ number: u64,
+ parent_hash: H256,
+ extrinsics_root: H256,
+ best: bool,
+ ) -> H256 {
+ use sp_runtime::testing::Digest;
+
+ let digest = Digest::default();
+ let header =
+ Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
+
+ let mut op = backend.begin_operation().unwrap();
+
+ op.set_block_data(
+ header.clone(),
+ Some(vec![]),
+ None,
+ None,
+ if best { NewBlockState::Best } else { NewBlockState::Normal },
+ )
+ .unwrap();
+
+ backend.commit_operation(op).unwrap();
+
+ header.hash()
+ }
+
pub fn insert_header_no_head(
backend: &Backend,
number: u64,
@@ -3112,6 +3141,123 @@ pub(crate) mod tests {
}
}
+ #[test]
+ fn displaced_leaves_after_finalizing_works_with_disconnect() {
+ // In this test we will create a situation that can typically happen after warp sync.
+ // The situation looks like this:
+ // g -> -> a3 -> a4
+ // Basically there is a gap of unimported blocks at some point in the chain.
+ let backend = Backend::::new_test(1000, 100);
+ let blockchain = backend.blockchain();
+ let genesis_number = 0;
+ let genesis_hash =
+ insert_header(&backend, genesis_number, Default::default(), None, Default::default());
+
+ let a3_number = 3;
+ let a3_hash = insert_disconnected_header(
+ &backend,
+ a3_number,
+ H256::from([200; 32]),
+ H256::from([1; 32]),
+ true,
+ );
+
+ let a4_number = 4;
+ let a4_hash =
+ insert_disconnected_header(&backend, a4_number, a3_hash, H256::from([2; 32]), true);
+ {
+ let displaced =
+ blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
+ assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
+ assert_eq!(displaced.displaced_leaves, vec![]);
+ assert_eq!(displaced.displaced_blocks, vec![]);
+ }
+
+ {
+ let displaced =
+ blockchain.displaced_leaves_after_finalizing(a4_hash, a4_number).unwrap();
+ assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
+ assert_eq!(displaced.displaced_leaves, vec![]);
+ assert_eq!(displaced.displaced_blocks, vec![]);
+ }
+
+ // Import block a1 which has the genesis block as parent.
+ // g -> a1 -> -> a3(f) -> a4
+ let a1_number = 1;
+ let a1_hash = insert_disconnected_header(
+ &backend,
+ a1_number,
+ genesis_hash,
+ H256::from([123; 32]),
+ false,
+ );
+ {
+ let displaced =
+ blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
+ assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash]);
+ assert_eq!(displaced.displaced_leaves, vec![]);
+ assert_eq!(displaced.displaced_blocks, vec![]);
+ }
+
+ // Import block b1 which has the genesis block as parent.
+ // g -> a1 -> -> a3(f) -> a4
+ // \-> b1
+ let b1_number = 1;
+ let b1_hash = insert_disconnected_header(
+ &backend,
+ b1_number,
+ genesis_hash,
+ H256::from([124; 32]),
+ false,
+ );
+ {
+ let displaced =
+ blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
+ assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash, b1_hash]);
+ assert_eq!(displaced.displaced_leaves, vec![]);
+ assert_eq!(displaced.displaced_blocks, vec![]);
+ }
+
+ // If branch of b blocks is higher in number than a branch, we
+ // should still not prune disconnected leafs.
+ // g -> a1 -> -> a3(f) -> a4
+ // \-> b1 -> b2 ----------> b3 ----> b4 -> b5
+ let b2_number = 2;
+ let b2_hash =
+ insert_disconnected_header(&backend, b2_number, b1_hash, H256::from([40; 32]), false);
+ let b3_number = 3;
+ let b3_hash =
+ insert_disconnected_header(&backend, b3_number, b2_hash, H256::from([41; 32]), false);
+ let b4_number = 4;
+ let b4_hash =
+ insert_disconnected_header(&backend, b4_number, b3_hash, H256::from([42; 32]), false);
+ let b5_number = 5;
+ let b5_hash =
+ insert_disconnected_header(&backend, b5_number, b4_hash, H256::from([43; 32]), false);
+ {
+ let displaced =
+ blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
+ assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, a1_hash]);
+ assert_eq!(displaced.displaced_leaves, vec![]);
+ assert_eq!(displaced.displaced_blocks, vec![]);
+ }
+
+ // Even though there is a disconnect, diplace should still detect
+ // branches above the block gap.
+ // /-> c4
+ // g -> a1 -> -> a3 -> a4(f)
+ // \-> b1 -> b2 ----------> b3 -> b4 -> b5
+ let c4_number = 4;
+ let c4_hash =
+ insert_disconnected_header(&backend, c4_number, a3_hash, H256::from([44; 32]), false);
+ {
+ let displaced =
+ blockchain.displaced_leaves_after_finalizing(a4_hash, a4_number).unwrap();
+ assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, c4_hash, a1_hash]);
+ assert_eq!(displaced.displaced_leaves, vec![(c4_number, c4_hash)]);
+ assert_eq!(displaced.displaced_blocks, vec![c4_hash]);
+ }
+ }
#[test]
fn displaced_leaves_after_finalizing_works() {
let backend = Backend::::new_test(1000, 100);
@@ -3156,6 +3302,15 @@ pub(crate) mod tests {
assert_eq!(displaced_a3.displaced_leaves, vec![]);
assert_eq!(displaced_a3.displaced_blocks, vec![]);
}
+ {
+ // Finalized block is above leaves and not imported yet.
+ // We will not be able to make a connection,
+ // nothing can be marked as displaced.
+ let displaced =
+ blockchain.displaced_leaves_after_finalizing(H256::from([57; 32]), 10).unwrap();
+ assert_eq!(displaced.displaced_leaves, vec![]);
+ assert_eq!(displaced.displaced_blocks, vec![]);
+ }
// fork from genesis: 2 prong.
let b1_number = 1;
diff --git a/substrate/kitchensink_runtime.wasm b/substrate/kitchensink_runtime.wasm
deleted file mode 100644
index 7ebb14371243..000000000000
Binary files a/substrate/kitchensink_runtime.wasm and /dev/null differ
diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml
index aedd720612c3..bd0daaf63c05 100644
--- a/substrate/primitives/blockchain/Cargo.toml
+++ b/substrate/primitives/blockchain/Cargo.toml
@@ -19,7 +19,6 @@ targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { features = ["derive"], workspace = true }
futures = { workspace = true }
-log = { workspace = true, default-features = true }
parking_lot = { workspace = true, default-features = true }
schnellru = { workspace = true }
thiserror = { workspace = true }
@@ -29,3 +28,4 @@ sp-consensus = { workspace = true, default-features = true }
sp-database = { workspace = true, default-features = true }
sp-runtime = { workspace = true, default-features = true }
sp-state-machine = { workspace = true, default-features = true }
+tracing = { workspace = true, default-features = true }
diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs
index a928217d5885..2accd4dad12c 100644
--- a/substrate/primitives/blockchain/src/backend.rs
+++ b/substrate/primitives/blockchain/src/backend.rs
@@ -17,7 +17,6 @@
//! Substrate blockchain trait
-use log::warn;
use parking_lot::RwLock;
use sp_runtime::{
generic::BlockId,
@@ -25,6 +24,7 @@ use sp_runtime::{
Justifications,
};
use std::collections::{btree_set::BTreeSet, HashMap, VecDeque};
+use tracing::{debug, warn};
use crate::{
error::{Error, Result},
@@ -228,6 +228,7 @@ pub trait Backend:
//
// FIXME #1558 only issue this warning when not on a dead fork
warn!(
+ target: crate::LOG_TARGET,
"Block {:?} exists in chain but not found when following all leaves backwards",
base_hash,
);
@@ -254,16 +255,35 @@ pub trait Backend:
) -> std::result::Result, Error> {
let leaves = self.leaves()?;
+ debug!(
+ target: crate::LOG_TARGET,
+ ?leaves,
+ %finalized_block_hash,
+ ?finalized_block_number,
+ "Checking for displaced leaves after finalization."
+ );
+
// If we have only one leaf there are no forks, and we can return early.
if finalized_block_number == Zero::zero() || leaves.len() == 1 {
return Ok(DisplacedLeavesAfterFinalization::default())
}
- // Store hashes of finalized blocks for quick checking later, the last block if the
+ // Store hashes of finalized blocks for quick checking later, the last block is the
// finalized one
let mut finalized_chain = VecDeque::new();
- finalized_chain
- .push_front(MinimalBlockMetadata::from(&self.header_metadata(finalized_block_hash)?));
+ let current_finalized = match self.header_metadata(finalized_block_hash) {
+ Ok(metadata) => metadata,
+ Err(Error::UnknownBlock(_)) => {
+ debug!(
+ target: crate::LOG_TARGET,
+ hash = ?finalized_block_hash,
+ "Tried to fetch unknown block, block ancestry has gaps."
+ );
+ return Ok(DisplacedLeavesAfterFinalization::default());
+ },
+ Err(e) => Err(e)?,
+ };
+ finalized_chain.push_front(MinimalBlockMetadata::from(¤t_finalized));
// Local cache is a performance optimization in case of finalized block deep below the
// tip of the chain with a lot of leaves above finalized block
@@ -273,6 +293,7 @@ pub trait Backend:
displaced_leaves: Vec::with_capacity(leaves.len()),
displaced_blocks: Vec::with_capacity(leaves.len()),
};
+
let mut displaced_blocks_candidates = Vec::new();
for leaf_hash in leaves {
@@ -306,11 +327,11 @@ pub trait Backend:
continue;
}
- // Otherwise the whole leaf branch needs to be pruned, track it all the way to the
- // point of branching from the finalized chain
- result.displaced_leaves.push((leaf_number, leaf_hash));
- result.displaced_blocks.extend(displaced_blocks_candidates.drain(..));
- result.displaced_blocks.push(current_header_metadata.hash);
+ // We reuse `displaced_blocks_candidates` to store the current metadata.
+ // This block is not displaced if there is a gap in the ancestry. We
+ // check for this gap later.
+ displaced_blocks_candidates.push(current_header_metadata.hash);
+
// Collect the rest of the displaced blocks of leaf branch
for distance_from_finalized in 1_u32.. {
// Find block at `distance_from_finalized` from finalized block
@@ -318,9 +339,22 @@ pub trait Backend:
match finalized_chain.iter().rev().nth(distance_from_finalized as usize) {
Some(header) => (header.number, header.hash),
None => {
- let metadata = MinimalBlockMetadata::from(&self.header_metadata(
- finalized_chain.front().expect("Not empty; qed").parent,
- )?);
+ let to_fetch = finalized_chain.front().expect("Not empty; qed");
+ let metadata = match self.header_metadata(to_fetch.parent) {
+ Ok(metadata) => metadata,
+ Err(Error::UnknownBlock(_)) => {
+ debug!(
+ target: crate::LOG_TARGET,
+ distance_from_finalized,
+ hash = ?to_fetch.parent,
+ number = ?to_fetch.number,
+ "Tried to fetch unknown block, block ancestry has gaps."
+ );
+ break;
+ },
+ Err(e) => Err(e)?,
+ };
+ let metadata = MinimalBlockMetadata::from(&metadata);
let result = (metadata.number, metadata.hash);
finalized_chain.push_front(metadata);
result
@@ -336,11 +370,13 @@ pub trait Backend:
let parent_hash = current_header_metadata.parent;
if finalized_chain_block_hash == parent_hash {
// Reached finalized chain, nothing left to do
+ result.displaced_blocks.extend(displaced_blocks_candidates.drain(..));
+ result.displaced_leaves.push((leaf_number, leaf_hash));
break;
}
// Store displaced block and look deeper for block on finalized chain
- result.displaced_blocks.push(parent_hash);
+ displaced_blocks_candidates.push(parent_hash);
current_header_metadata =
MinimalBlockMetadata::from(&self.header_metadata(parent_hash)?);
}
diff --git a/substrate/primitives/blockchain/src/lib.rs b/substrate/primitives/blockchain/src/lib.rs
index eabbbcf50d9f..305b7f6afec1 100644
--- a/substrate/primitives/blockchain/src/lib.rs
+++ b/substrate/primitives/blockchain/src/lib.rs
@@ -24,3 +24,5 @@ mod header_metadata;
pub use backend::*;
pub use error::*;
pub use header_metadata::*;
+
+const LOG_TARGET: &str = "db::blockchain";