From 5bea149886c7ee9bf21745ec235b1e6363691b58 Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Tue, 7 Dec 2021 20:42:41 -0500 Subject: [PATCH] Support pre-runtime digest and instant seal (#15) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * checkpoint. Gonna try switching to preruntime digest first * pallet allows preruntime digests and validates at end of block * remove incorrect comment * factor slot prediction into helper function, start sketching manual seal consensus data provider * better message for future self * preruntime digest interface * `create_digest` compiles * service compiles * whoe service checks * allow running from cli * actually install our new manual seal consensus data provider * Handle no-key scenario * Preruntime digest an no memoizing author account * remove wrong comment. Better to follow aura * prune commented inherent-related code * Hack in fake signature in manual seal. * unused import * verifier supports both digest types * duplicate code from verifier to executor wrapper See https://github.com/PureStake/nimbus/issues/14 about de-duplicating this code * author inherent no longer required * sign blocks in manual seal !! * Hack import queue so it works in instant seal although it is now broken in parachain mode. * prune some debugging logs * NimbusBlockImport with optional parachain rules * we can use std on the client-side * remove unnecessary generic * indentation and comment * use constant instead of hard-coded nmbs * code style: map instead of if let * assume relay chain slot beacon in manual seal for now * Actually checkvalidity and send notification when using preruntime digest * Typos from code review Co-authored-by: Amar Singh Co-authored-by: girazoki * Proper index Co-authored-by: Amar Singh * Switch consensus worker from inherent to preruntime digest. Was it really that easy!? * map from author id to account id in FindAuthor * more toward parachain consensus worker * Update nimbus-consensus/src/import_queue.rs * inherent is a no-op * Revert "more toward parachain consensus worker" This reverts commit 807e72be3398fec3c4418d4cb0f8a43b0791395a. * validators don't need backward compatability * Fix incorrect merge conflict resolution. Still detect NimbusApi * Add copyright to manual seal file. Hell Yeah CI! * Typo found by @girazoki * stray space * Single author in dev spec so that `run-instant-seal --dev` works with one node * warnings * Apply suggestions from code review Co-authored-by: girazoki Co-authored-by: Éloïs * better weight in `on_initialize` * thoughts about the legacy author inherent * sketch kick-off inherent in pallet * fees * Update the inherent extrinsics Co-authored-by: Amar Singh Co-authored-by: girazoki Co-authored-by: Éloïs (cherry picked from commit d610b6829cc174b467b57d0b63246edb3d3dec16) # Conflicts: # Cargo.lock # nimbus-consensus/Cargo.toml # nimbus-consensus/src/lib.rs # pallets/author-inherent/src/lib.rs # parachain-template/node/Cargo.toml --- nimbus-consensus/Cargo.toml | 2 +- nimbus-consensus/src/import_queue.rs | 87 ++++++-- nimbus-consensus/src/lib.rs | 236 ++++++++++++++-------- nimbus-consensus/src/manual_seal.rs | 120 +++++++++++ nimbus-primitives/src/digests.rs | 21 +- nimbus-primitives/src/lib.rs | 2 + pallets/author-inherent/src/exec.rs | 23 +-- pallets/author-inherent/src/lib.rs | 147 +++++++------- parachain-template/node/Cargo.toml | 1 + parachain-template/node/src/chain_spec.rs | 4 - parachain-template/node/src/cli.rs | 3 + parachain-template/node/src/command.rs | 10 +- parachain-template/node/src/service.rs | 131 +++++++++++- 13 files changed, 579 insertions(+), 208 deletions(-) create mode 100644 nimbus-consensus/src/manual_seal.rs diff --git a/nimbus-consensus/Cargo.toml b/nimbus-consensus/Cargo.toml index c778c5ff..3aa69b7e 100644 --- a/nimbus-consensus/Cargo.toml +++ b/nimbus-consensus/Cargo.toml @@ -15,10 +15,10 @@ sp-block-builder = { git = "https://github.com/purestake/substrate", branch = "m sp-api = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } sc-client-api = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } sc-consensus = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } +sc-consensus-manual-seal = { git = "https://github.com/paritytech/substrate", branch = "moonbeam-polkadot-v0.9.13" } substrate-prometheus-endpoint = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } sp-keystore = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } sp-application-crypto = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } -sp-std = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } # Polkadot dependencies polkadot-client = { git = "https://github.com/purestake/polkadot", branch = "moonbeam-polkadot-v0.9.13", default-features = false } diff --git a/nimbus-consensus/src/import_queue.rs b/nimbus-consensus/src/import_queue.rs index f770047b..6679166e 100644 --- a/nimbus-consensus/src/import_queue.rs +++ b/nimbus-consensus/src/import_queue.rs @@ -32,7 +32,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, DigestItem, }; -use nimbus_primitives::{NimbusId, NimbusPair, digests::CompatibleDigestItem}; +use nimbus_primitives::{NimbusId, NimbusPair, digests::CompatibleDigestItem, NIMBUS_ENGINE_ID}; use sp_application_crypto::{Pair as _, Public as _}; use log::debug; @@ -76,24 +76,20 @@ where debug!(target: crate::LOG_TARGET, "🪲 Signature according to verifier is {:?}", signature); - // Grab the digest from the runtime - //TODO use the trait. Maybe this code should move to the trait. - let consensus_digest = block_params.header + // Grab the author information from either the preruntime digest or the consensus digest + //TODO use the trait + let claimed_author = block_params.header .digest() .logs .iter() - .find(|digest| { + .find_map(|digest| { match *digest { - DigestItem::Consensus(id, _) if id == b"nmbs" => true, - _ => false, + DigestItem::Consensus(id, ref author_id) if id == NIMBUS_ENGINE_ID => Some(author_id.clone()), + DigestItem::PreRuntime(id, ref author_id) if id == NIMBUS_ENGINE_ID => Some(author_id.clone()), + _ => None, } }) - .expect("A single consensus digest should be added by the runtime when executing the author inherent."); - - let claimed_author = match *consensus_digest { - DigestItem::Consensus(id, ref author_id) if id == *b"nmbs" => author_id.clone(), - _ => panic!("Expected consensus digest to contains author id bytes"), - }; + .expect("Expected one consensus or pre-runtime digest that contains author id bytes"); debug!(target: crate::LOG_TARGET, "🪲 Claimed Author according to verifier is {:?}", claimed_author); @@ -152,6 +148,9 @@ where block_params.post_digests.push(seal); + // The standard is to use the longest chain rule. This is overridden by the `NimbusBlockImport` in the parachain context. + block_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); + debug!(target: crate::LOG_TARGET, "🪲 Just finished verifier. posthash from params is {:?}", &block_params.post_hash()); Ok((block_params, None)) @@ -165,6 +164,7 @@ pub fn import_queue( create_inherent_data_providers: CIDP, spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&substrate_prometheus_endpoint::Registry>, + parachain: bool, ) -> ClientResult> where I: BlockImport + Send + Sync + 'static, @@ -181,11 +181,70 @@ where Ok(BasicQueue::new( verifier, - Box::new(cumulus_client_consensus_common::ParachainBlockImport::new( + Box::new(NimbusBlockImport::new( block_import, + parachain, )), None, spawner, registry, )) } + +/// Nimbus specific block import. +/// +/// Nimbus supports both parachain and non-parachain contexts. In the parachain +/// context, new blocks should not be imported as best. Cumulus's ParachainBlockImport +/// handles this correctly, but does not work in non-parachain contexts. +/// This block import has a field indicating whether we should apply parachain rules or not. +/// +/// There may be additional nimbus-specific logic here in the future, but for now it is +/// only the conditional parachain logic +pub struct NimbusBlockImport{ + inner: I, + parachain_context: bool, +} + +impl NimbusBlockImport { + /// Create a new instance. + pub fn new(inner: I, parachain_context: bool) -> Self { + Self{ + inner, + parachain_context, + } + } +} + +#[async_trait::async_trait] +impl BlockImport for NimbusBlockImport +where + Block: BlockT, + I: BlockImport + Send, +{ + type Error = I::Error; + type Transaction = I::Transaction; + + async fn check_block( + &mut self, + block: sc_consensus::BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await + } + + async fn import_block( + &mut self, + mut block_import_params: sc_consensus::BlockImportParams, + cache: std::collections::HashMap>, + ) -> Result { + // If we are in the parachain context, best block is determined by the relay chain + // except during initial sync + if self.parachain_context { + block_import_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( + block_import_params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, + )); + } + + // Now continue on to the rest of the import pipeline. + self.inner.import_block(block_import_params, cache).await + } +} \ No newline at end of file diff --git a/nimbus-consensus/src/lib.rs b/nimbus-consensus/src/lib.rs index 88e5bb7d..751d75ba 100644 --- a/nimbus-consensus/src/lib.rs +++ b/nimbus-consensus/src/lib.rs @@ -33,20 +33,23 @@ use parking_lot::Mutex; use polkadot_client::ClientHandle; use sc_client_api::Backend; use sp_api::{ProvideRuntimeApi, BlockId, ApiExt}; +use sp_application_crypto::CryptoTypePublicPair; use sp_consensus::{ BlockOrigin, EnableProofRecording, Environment, ProofRecording, Proposal, Proposer, }; use sc_consensus::{BlockImport, BlockImportParams}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; -use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT, DigestItemFor}; use std::{marker::PhantomData, sync::Arc, time::Duration}; use tracing::error; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; use sp_core::crypto::Public; -use sp_std::convert::TryInto; -use nimbus_primitives::{AuthorFilterAPI, NIMBUS_KEY_ID, NimbusId}; +use std::convert::TryInto; +use nimbus_primitives::{AuthorFilterAPI, NIMBUS_KEY_ID, NimbusId, NimbusApi, CompatibleDigestItem}; mod import_queue; +mod manual_seal; +pub use manual_seal::NimbusManualSealConsensusDataProvider; const LOG_TARGET: &str = "filtering-consensus"; @@ -153,6 +156,133 @@ where } } +/// Grabs any available nimbus key from the keystore. +/// This may be useful in situations where you expect exactly one key +/// and intend to perform an operation with it regardless of whether it is +/// expected to be eligible. Concretely, this is used in the consensus worker +/// to implement the `skip_prediction` feature. +pub(crate) fn first_available_key(keystore: &dyn SyncCryptoStore) -> Option { + // Get all the available keys + let available_keys = + SyncCryptoStore::keys(keystore, NIMBUS_KEY_ID) + .expect("keystore should return the keys it has"); + + // Print a more helpful message than "not eligible" when there are no keys at all. + if available_keys.is_empty() { + warn!(target: LOG_TARGET, "🔏 No Nimbus keys available. We will not be able to author."); + return None; + } + + Some(available_keys[0].clone()) +} + +/// Grab the first eligible nimbus key from the keystore +/// If multiple keys are eligible this function still only returns one +/// and makes no guarantees which one as that depends on the keystore's iterator behavior. +/// This is the standard way of determining which key to author with. +pub(crate) fn first_eligible_key(client: Arc, keystore: &dyn SyncCryptoStore, parent: &B::Header, slot_number: u32) -> Option +where + C: ProvideRuntimeApi, + C::Api: NimbusApi, + C::Api: AuthorFilterAPI, +{ + // Get all the available keys + let available_keys = + SyncCryptoStore::keys(keystore, NIMBUS_KEY_ID) + .expect("keystore should return the keys it has"); + + // Print a more helpful message than "not eligible" when there are no keys at all. + if available_keys.is_empty() { + warn!(target: LOG_TARGET, "🔏 No Nimbus keys available. We will not be able to author."); + return None; + }let at = BlockId::Hash(parent.hash()); + + // helper function for calling the various runtime apis and versions + let prediction_helper = |at, nimbus_id: NimbusId, slot: u32, parent| -> bool { + + let has_nimbus_api = client + .runtime_api() + .has_api::>(at) + .expect("should be able to dynamically detect the api"); + + if has_nimbus_api { + NimbusApi::can_author(&*client.runtime_api(), at, nimbus_id, slot, parent) + .expect("NimbusAPI should not return error") + } else { + // There are two versions of the author filter, so we do that dynamically also. + let api_version = client.runtime_api() + .api_version::>(&at) + .expect("Runtime api access to not error.") + .expect("Should be able to detect author filter version"); + + if api_version >= 2 { + AuthorFilterAPI::can_author(&*client.runtime_api(), at, nimbus_id, slot, parent) + .expect("Author API should not return error") + } else { + #[allow(deprecated)] + client.runtime_api().can_author_before_version_2( + &at, + nimbus_id, + slot_number, + ) + .expect("Author API version 2 should not return error") + } + } + }; + + // Iterate keys until we find an eligible one, or run out of candidates. + // If we are skipping prediction, then we author with the first key we find. + // prediction skipping only really makes sense when there is a single key in the keystore. + let maybe_key = available_keys.into_iter().find(|type_public_pair| { + // Have to convert to a typed NimbusId to pass to the runtime API. Maybe this is a clue + // That I should be passing Vec across the wasm boundary? + prediction_helper( + &at, + NimbusId::from_slice(&type_public_pair.1), + slot_number, + parent, + ) + }); + + // If there are no eligible keys, print the log, and exit early. + if maybe_key.is_none() { + info!( + target: LOG_TARGET, + "🔮 Skipping candidate production because we are not eligible" + ); + } + + maybe_key +} + +pub(crate) fn seal_header(header: &B::Header, keystore: &dyn SyncCryptoStore, type_public_pair: &CryptoTypePublicPair) -> DigestItemFor +where + B: BlockT, +{ + let pre_hash = header.hash(); + + let raw_sig = SyncCryptoStore::sign_with( + &*keystore, + NIMBUS_KEY_ID, + type_public_pair, + pre_hash.as_ref(), + ) + .expect("Keystore should be able to sign") + .expect("We already checked that the key was present"); + + debug!( + target: LOG_TARGET, + "The signature is \n{:?}", raw_sig + ); + + let signature = raw_sig + .clone() + .try_into() + .expect("signature bytes produced by keystore should be right length"); + + as CompatibleDigestItem>::nimbus_seal(signature) +} + #[async_trait::async_trait] impl ParachainConsensus for NimbusConsensus @@ -179,74 +309,18 @@ where relay_parent: PHash, validation_data: &PersistedValidationData, ) -> Option> { - // Design decision: We will check the keystore for any available keys. Then we will iterate - // those keys until we find one that is eligible. If none are eligible, we skip this slot. - // If multiple are eligible, we only author with the first one. - - // Get allthe available keys - let available_keys = - SyncCryptoStore::keys(&*self.keystore, NIMBUS_KEY_ID) - .expect("keystore should return the keys it has"); - - // Print a more helpful message than "not eligible" when there are no keys at all. - if available_keys.is_empty() { - warn!(target: LOG_TARGET, "🔏 No Nimbus keys available. We will not be able to author."); - return None; - } - - let at = BlockId::Hash(parent.hash()); - // Get `AuthorFilterAPI` version. - let api_version = self.parachain_client.runtime_api() - .api_version::>(&at) - .expect("Runtime api access to not error."); - if api_version.is_none() { - tracing::error!( - target: LOG_TARGET, "Could not find `AuthorFilterAPI` version.", - ); - return None; + let maybe_key = if self.skip_prediction { + first_available_key(&*self.keystore) } - let api_version = api_version.unwrap(); - - // Iterate keys until we find an eligible one, or run out of candidates. - // If we are skipping prediction, then we author withthe first key we find. - // prediction skipping only really amkes sense when there is a single key in the keystore. - let maybe_key = available_keys.into_iter().find(|type_public_pair| { - - // If we are not predicting, just return the first one we find. - self.skip_prediction || - - // Have to convert to a typed NimbusId to pass to the runtime API. Maybe this is a clue - // That I should be passing Vec across the wasm boundary? - if api_version >= 2 { - self.parachain_client.runtime_api().can_author( - &at, - NimbusId::from_slice(&type_public_pair.1), - validation_data.relay_parent_number, - parent, - ) - .expect("Author API should not return error") - } else { - #[allow(deprecated)] - self.parachain_client.runtime_api().can_author_before_version_2( - &at, - NimbusId::from_slice(&type_public_pair.1), - validation_data.relay_parent_number, - ) - .expect("Author API version 2 should not return error") - } - }); + else { + first_eligible_key::(self.parachain_client.clone(), &*self.keystore, parent, validation_data.relay_parent_number) + }; // If there are no eligible keys, print the log, and exit early. let type_public_pair = match maybe_key { Some(p) => p, - None => { - info!( - target: LOG_TARGET, - "🔮 Skipping candidate production because we are not eligible" - ); - return None; - } + None => { return None; } }; let proposer_future = self.proposer_factory.lock().init(&parent); @@ -260,6 +334,12 @@ where let inherent_data = self.inherent_data(parent.hash(),&validation_data, relay_parent, NimbusId::from_slice(&type_public_pair.1)).await?; + let inherent_digests = sp_runtime::generic::Digest { + logs: vec![ + CompatibleDigestItem::nimbus_pre_digest(NimbusId::from_slice(&type_public_pair.1)), + ] + }; + let Proposal { block, storage_changes, @@ -267,7 +347,7 @@ where } = proposer .propose( inherent_data, - Default::default(), + inherent_digests, //TODO: Fix this. Duration::from_millis(500), // Set the block limit to 50% of the maximum PoV size. @@ -282,27 +362,7 @@ where let (header, extrinsics) = block.clone().deconstruct(); - let pre_hash = header.hash(); - - let raw_sig = SyncCryptoStore::sign_with( - &*self.keystore, - NIMBUS_KEY_ID, - &type_public_pair, - pre_hash.as_ref(), - ) - .expect("Keystore should be able to sign") - .expect("We already checked that the key was present"); - - debug!( - target: LOG_TARGET, - "The signature is \n{:?}", raw_sig - ); - - let signature = raw_sig - .clone() - .try_into().ok()?; - - let sig_digest = ::nimbus_seal(signature); + let sig_digest = seal_header::(&header, &*self.keystore, &type_public_pair); let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header.clone()); block_import_params.post_digests.push(sig_digest.clone()); @@ -316,7 +376,7 @@ where "🔖 Sealed block for proposal at {}. Hash now {:?}, previously {:?}.", *header.number(), block_import_params.post_hash(), - pre_hash, + header.hash(), ); if let Err(err) = self diff --git a/nimbus-consensus/src/manual_seal.rs b/nimbus-consensus/src/manual_seal.rs new file mode 100644 index 00000000..fe8efff8 --- /dev/null +++ b/nimbus-consensus/src/manual_seal.rs @@ -0,0 +1,120 @@ +// Copyright 2019-2021 PureStake Inc. +// This file is part of Nimbus. + +// Nimbus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Nimbus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Nimbus. If not, see . + +use std::sync::Arc; +use sp_keystore::SyncCryptoStorePtr; +use sp_runtime::{ + traits::{Block as BlockT, DigestFor}, + generic::{Digest, DigestItem}, +}; +use sp_core::crypto::Public; +use sc_consensus::BlockImportParams; +use sc_consensus_manual_seal::{ConsensusDataProvider, Error}; +use sp_api::{TransactionFor, ProvideRuntimeApi, HeaderT}; +use sp_inherents::InherentData; +use nimbus_primitives::{AuthorFilterAPI, NimbusApi, NimbusId, CompatibleDigestItem, NIMBUS_ENGINE_ID}; +use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_IDENTIFIER as PARACHAIN_INHERENT_IDENTIFIER}; + +/// Provides nimbus-compatible pre-runtime digests for use with manual seal consensus +pub struct NimbusManualSealConsensusDataProvider { + /// Shared reference to keystore + pub keystore: SyncCryptoStorePtr, + + /// Shared reference to the client + pub client: Arc, + + // Could have a skip_prediction field here if it becomes desireable +} + +impl ConsensusDataProvider for NimbusManualSealConsensusDataProvider +where + B: BlockT, + C: ProvideRuntimeApi + Send + Sync, + C::Api: NimbusApi, + C::Api: AuthorFilterAPI, + { + type Transaction = TransactionFor; + + fn create_digest( + &self, + parent: &B::Header, + inherents: &InherentData, + ) -> Result, Error> { + // Retrieve the relay chain block number to use as the slot number from the parachain inherent + let slot_number = inherents + .get_data::(&PARACHAIN_INHERENT_IDENTIFIER) + .expect("Parachain inherent should decode correctly") + .expect("Parachain inherent should be present because we are mocking it") + .validation_data + .relay_parent_number; + + // Fetch first eligible key from keystore + let maybe_key = crate::first_eligible_key::( + self.client.clone(), + &*self.keystore, + parent, + // For now we author all blocks in slot zero, which is consistent with how we are + // mocking the relay chain height which the runtime uses for slot beacon. + // This should improve. See https://github.com/PureStake/nimbus/issues/3 + slot_number, + ); + + // If we aren't eligible, return an appropriate error + match maybe_key { + Some(key) => { + Ok(Digest{ + logs: vec![DigestItem::nimbus_pre_digest(NimbusId::from_slice(&key.1))], + }) + }, + None => { + Err(Error::StringError(String::from("no nimbus keys available to manual seal"))) + }, + } + + } + + // This is where we actually sign with the nimbus key and attach the seal + fn append_block_import( + &self, + _parent: &B::Header, + params: &mut BlockImportParams, + _inherents: &InherentData, + ) -> Result<(), Error> { + + // We have to reconstruct the type-public pair which is only communicated through the pre-runtime digest + let claimed_author = params + .header + .digest() + .logs + .iter() + .find_map(|digest| { + match *digest { + // We do not support the older author inherent in manual seal + DigestItem::PreRuntime(id, ref author_id) if id == NIMBUS_ENGINE_ID => Some(author_id.clone()), + _ => None, + } + }) + .expect("Expected one pre-runtime digest that contains author id bytes"); + + let nimbus_public = NimbusId::from_slice(&claimed_author); + + let sig_digest = crate::seal_header::(¶ms.header, &*self.keystore, &nimbus_public.into()); + + params.post_digests.push(sig_digest); + + Ok(()) + } +} diff --git a/nimbus-primitives/src/digests.rs b/nimbus-primitives/src/digests.rs index feab36d1..f37ea631 100644 --- a/nimbus-primitives/src/digests.rs +++ b/nimbus-primitives/src/digests.rs @@ -13,21 +13,37 @@ use parity_scale_codec::Encode; /// A digest item which is usable with aura consensus. pub trait CompatibleDigestItem: Sized { + /// Construct a pre-runtime digest from the given AuthorId + fn nimbus_pre_digest(author: NimbusId) -> Self; + + /// If this item is a nimbus pre-runtime digest, return the author + fn as_nimbus_pre_digest(&self) -> Option; + /// Construct a seal digest item from the given signature fn nimbus_seal(signature: NimbusSignature) -> Self; /// If this item is a nimbus seal, return the signature. fn as_nimbus_seal(&self) -> Option; + /// This will be deprecated in the future /// Construct a consensus digest from the given AuthorId fn nimbus_consensus_digest(author: NimbusId) -> Self; - /// If this item is a nimbus consensus digest, return the author + /// This will be deprecated in the future + /// If this item is a nimbus consensus digest, return the author fn as_nimbus_consensus_digest(&self) -> Option; } impl CompatibleDigestItem for DigestItem { + fn nimbus_pre_digest(author: NimbusId) -> Self { + DigestItem::PreRuntime(NIMBUS_ENGINE_ID, author.encode()) + } + + fn as_nimbus_pre_digest(&self) -> Option { + self.pre_runtime_try_to(&NIMBUS_ENGINE_ID) + } + fn nimbus_seal(signature: NimbusSignature) -> Self { DigestItem::Seal(NIMBUS_ENGINE_ID, signature.encode()) } @@ -36,10 +52,13 @@ impl CompatibleDigestItem for DigestItem self.seal_try_to(&NIMBUS_ENGINE_ID) } + // Remove this once deprecated fn nimbus_consensus_digest(author: NimbusId) -> Self { DigestItem::Consensus(NIMBUS_ENGINE_ID, author.encode()) } + // Remove this once deprecated. I don't think it is used anyway. + // Notice that it calls the pre_runtime helper function. fn as_nimbus_consensus_digest(&self) -> Option { self.pre_runtime_try_to(&NIMBUS_ENGINE_ID) } diff --git a/nimbus-primitives/src/lib.rs b/nimbus-primitives/src/lib.rs index 03b02107..6c76fbd6 100644 --- a/nimbus-primitives/src/lib.rs +++ b/nimbus-primitives/src/lib.rs @@ -30,6 +30,8 @@ use sp_runtime::traits::BlockNumberProvider; pub mod digests; mod inherents; +pub use digests::CompatibleDigestItem; + pub use inherents::{INHERENT_IDENTIFIER, InherentDataProvider}; /// The given account ID is the author of the current block. diff --git a/pallets/author-inherent/src/exec.rs b/pallets/author-inherent/src/exec.rs index 5849f579..f6ece255 100644 --- a/pallets/author-inherent/src/exec.rs +++ b/pallets/author-inherent/src/exec.rs @@ -57,34 +57,25 @@ where debug!(target: "executive", "In hacked Executive. digests after stripping {:?}", header.digest()); debug!(target: "executive", "The seal we got {:?}", seal); - // let sig = match seal { - // DigestItem::Seal(id, ref sig) if id == NIMBUS_ENGINE_ID => sig.clone(), - // _ => panic!("HeaderUnsealed"), - // }; let signature = seal.as_nimbus_seal().unwrap_or_else(||panic!("HeaderUnsealed")); debug!(target: "executive", "🪲 Header hash after popping digest {:?}", header.hash()); debug!(target: "executive", "🪲 Signature according to executive is {:?}", signature); - // Grab the digest from the runtime - //TODO use the CompatibleDigest trait. Maybe this code should move to the trait. - let consensus_digest = header + // Grab the author information from the preruntime digest + //TODO use the trait + let claimed_author = header .digest() .logs .iter() - .find(|digest| { + .find_map(|digest| { match *digest { - DigestItem::Consensus(id, _) if id == &NIMBUS_ENGINE_ID => true, - _ => false, + DigestItem::PreRuntime(id, ref author_id) if id == NIMBUS_ENGINE_ID => Some(author_id.clone()), + _ => None, } }) - .expect("A single consensus digest should be added by the runtime when executing the author inherent."); - - let claimed_author = match *consensus_digest { - DigestItem::Consensus(id, ref author_id) if id == NIMBUS_ENGINE_ID => author_id.clone(), - _ => panic!("Expected consensus digest to contains author id bytes"), - }; + .expect("Expected pre-runtime digest that contains author id bytes"); debug!(target: "executive", "🪲 Claimed Author according to executive is {:?}", claimed_author); diff --git a/pallets/author-inherent/src/lib.rs b/pallets/author-inherent/src/lib.rs index 775c119f..70d1f9b7 100644 --- a/pallets/author-inherent/src/lib.rs +++ b/pallets/author-inherent/src/lib.rs @@ -1,18 +1,18 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// Copyright 2019-2021 PureStake Inc. +// This file is part of Nimbus. -// Cumulus is free software: you can redistribute it and/or modify +// Nimbus is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Cumulus is distributed in the hope that it will be useful, +// Nimbus is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// along with Nimbus. If not, see . //! Pallet that allows block authors to include their identity in a block via an inherent. //! Currently the author does not _prove_ their identity, just states it. So it should not be used, @@ -20,16 +20,13 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{ - traits::FindAuthor, +use frame_support::traits::FindAuthor; +use nimbus_primitives::{ + AccountLookup, CanAuthor, EventHandler, SlotBeacon, INHERENT_IDENTIFIER, NIMBUS_ENGINE_ID, NimbusId, }; use parity_scale_codec::{Decode, Encode}; use sp_inherents::{InherentIdentifier, IsFatalError}; -use sp_runtime::{ - ConsensusEngineId, DigestItem, RuntimeString, RuntimeAppPublic, -}; -use log::debug; -use nimbus_primitives::{AccountLookup, CanAuthor, NIMBUS_ENGINE_ID, SlotBeacon, EventHandler, INHERENT_IDENTIFIER}; +use sp_runtime::{ConsensusEngineId, RuntimeString}; mod exec; pub use exec::BlockExecutor; @@ -48,15 +45,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { - // This is copied from Aura. I wonder if I really need all those trait bounds. For now I'll leave them. - // TODO could I remove this type entirely and just always use NimbusId? Why didn't Aura do that? - /// The identifier type for an authority. - type AuthorId: Member + Parameter + RuntimeAppPublic + Default + MaybeSerializeDeserialize; - /// A type to convert between AuthorId and AccountId. This is useful when you want to associate /// Block authoring behavior with an AccoutId for rewards or slashing. If you do not need to /// hold an AccountID responsible for authoring use `()` which acts as an identity mapping. - type AccountLookup: AccountLookup; + type AccountLookup: AccountLookup; /// Other pallets that want to be informed about block authorship type EventHandler: EventHandler; @@ -72,14 +64,10 @@ pub mod pallet { type SlotBeacon: SlotBeacon; } - // If the AccountId type supports it, then this pallet can be BoundToRuntimeAppPublic - impl sp_runtime::BoundToRuntimeAppPublic for Pallet - where - T: Config, - T::AuthorId: RuntimeAppPublic, - { - type Public = T::AuthorId; + impl sp_runtime::BoundToRuntimeAppPublic for Pallet { + type Public = NimbusId; } + #[pallet::error] pub enum Error { /// Author already set in block. @@ -90,7 +78,6 @@ pub mod pallet { CannotBeAuthor, } - /// Author of current block. #[pallet::storage] pub type Author = StorageValue<_, T::AccountId, OptionQuery>; @@ -98,44 +85,45 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_: T::BlockNumber) -> Weight { + // Start by clearing out the previous block's author >::kill(); - 0 + + // Now extract the author from the digest + let digest = >::digest(); + + let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); + Self::find_author(pre_runtime_digests).map(|author_account|{ + // Store the author so we can confirm eligibility after the inherents have executed + >::put(&author_account); + + //TODO, should we reuse the same trait that Pallet Authorship uses? + // Notify any other pallets that are listening (eg rewards) about the author + T::EventHandler::note_author(author_account); + }); + + T::DbWeight::get().write * 2 } } #[pallet::call] impl Pallet { - /// Inherent to set the author of a block - #[pallet::weight((0, DispatchClass::Mandatory))] - pub fn set_author(origin: OriginFor, author: T::AuthorId) -> DispatchResult { - + /// This inherent is a workaround to run code after the "real" inherents have executed, + /// but before transactions are executed. + /// This this should go into on_post_inherents when it is ready https://github.com/paritytech/substrate/pull/10128 + /// TODO better weight. For now we jsut set a somewhat soncervative fudge factor + #[pallet::weight((10 * T::DbWeight::get().write, DispatchClass::Mandatory))] + pub fn kick_off_authorship_validation(origin: OriginFor) -> DispatchResultWithPostInfo { ensure_none(origin)?; - ensure!(>::get().is_none(), Error::::AuthorAlreadySet); - debug!(target: "author-inherent", "Author was not already set"); - - let slot = T::SlotBeacon::slot(); - debug!(target: "author-inherent", "Slot is {:?}", slot); - - let account = T::AccountLookup::lookup_account(&author).ok_or( - Error::::NoAccountId - )?; - - ensure!(T::CanAuthor::can_author(&account, &slot), Error::::CannotBeAuthor); - - // Update storage - Author::::put(&account); - - // Add a consensus digest so the client-side worker can verify the block is signed by the right person. - frame_system::Pallet::::deposit_log(DigestItem::Consensus( - NIMBUS_ENGINE_ID, - author.encode(), - )); - - // Notify any other pallets that are listening (eg rewards) about the author - T::EventHandler::note_author(account); + let author = >::get() + .expect("Block invalid, no authorship information supplied in preruntime digest."); + + assert!( + T::CanAuthor::can_author(&author, &T::SlotBeacon::slot()), + "Block invalid, supplied author is not eligible." + ); - Ok(()) + Ok(Pays::No.into()) } } @@ -149,43 +137,46 @@ pub mod pallet { // Return Ok(Some(_)) unconditionally because this inherent is required in every block // If it is not found, throw an AuthorInherentRequired error. Ok(Some(InherentError::Other( - sp_runtime::RuntimeString::Borrowed("AuthorInherentRequired"), + sp_runtime::RuntimeString::Borrowed("Inherent required to manually initiate author validation"), ))) } - fn create_inherent(data: &InherentData) -> Option { - let author_raw = data - .get_data::(&INHERENT_IDENTIFIER); - - debug!("In create_inherent (runtime side). data is"); - debug!("{:?}", author_raw); - - let author = author_raw - .expect("Gets and decodes authorship inherent data")?; - - Some(Call::set_author{author}) + // Regardless of whether the client is still supplying the author id, + // we will create the new empty-payload inherent extrinsic. + fn create_inherent(_data: &InherentData) -> Option { + Some(Call::kick_off_authorship_validation{}) } fn is_inherent(call: &Self::Call) -> bool { - matches!(call, Call::set_author{..}) + matches!(call, Call::kick_off_authorship_validation{..}) } } impl FindAuthor for Pallet { - fn find_author<'a, I>(_digests: I) -> Option + fn find_author<'a, I>(digests: I) -> Option where I: 'a + IntoIterator, { - // We don't use the digests at all. - // This will only return the correct author _after_ the authorship inherent is processed. - >::get() + for (id, mut data) in digests.into_iter() { + if id == NIMBUS_ENGINE_ID { + let author_id = NimbusId::decode(&mut data) + .expect("NimbusId encoded in preruntime digest must be valid"); + + let author_account = T::AccountLookup::lookup_account(&author_id) + .expect("No Account Mapped to this NimbusId"); + + return Some(author_account); + } + } + + None } } - /// To learn whether a given AuthorId can author, you call the author-inherent directly. - /// It will do the mapping lookup. - impl CanAuthor for Pallet { - fn can_author(author: &T::AuthorId, slot: &u32) -> bool { + /// To learn whether a given NimbusId can author, as opposed to an account id, you + /// can ask this pallet directly. It will do the mapping for you. + impl CanAuthor for Pallet { + fn can_author(author: &NimbusId, slot: &u32) -> bool { let account = match T::AccountLookup::lookup_account(&author) { Some(account) => account, // Authors whose account lookups fail will not be eligible @@ -225,7 +216,6 @@ impl InherentError { } } - #[cfg(test)] mod tests { use super::*; @@ -235,14 +225,13 @@ mod tests { assert_noop, assert_ok, parameter_types, traits::{OnFinalize, OnInitialize}, }; + use sp_core::Public; use sp_core::H256; use sp_io::TestExternalities; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; - use nimbus_primitives::NimbusId; - use sp_core::Public; const TEST_AUTHOR_ID: [u8; 32] = [0u8; 32]; const BOGUS_AUTHOR_ID: [u8; 32] = [1u8; 32]; diff --git a/parachain-template/node/Cargo.toml b/parachain-template/node/Cargo.toml index f9e9a659..90561303 100644 --- a/parachain-template/node/Cargo.toml +++ b/parachain-template/node/Cargo.toml @@ -50,6 +50,7 @@ substrate-prometheus-endpoint = { git = "https://github.com/purestake/substrate" ## Substrate Client Dependencies sc-basic-authorship = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } sc-chain-spec = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } +sc-consensus-manual-seal = { git = "https://github.com/paritytech/substrate", branch = "moonbeam-polkadot-v0.9.13" } sc-cli = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } sc-client-api = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } sc-consensus = { git = "https://github.com/purestake/substrate", branch = "moonbeam-polkadot-v0.9.13" } diff --git a/parachain-template/node/src/chain_spec.rs b/parachain-template/node/src/chain_spec.rs index 9878e14b..6f48700f 100644 --- a/parachain-template/node/src/chain_spec.rs +++ b/parachain-template/node/src/chain_spec.rs @@ -72,10 +72,6 @@ pub fn development_config(id: ParaId) -> ChainSpec { get_account_id_from_seed::("Alice"), get_collator_keys_from_seed("Alice"), ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed("Bob"), - ), ], vec![ get_account_id_from_seed::("Alice"), diff --git a/parachain-template/node/src/cli.rs b/parachain-template/node/src/cli.rs index 319893a0..fe5b1e58 100644 --- a/parachain-template/node/src/cli.rs +++ b/parachain-template/node/src/cli.rs @@ -34,6 +34,9 @@ pub enum Subcommand { /// Revert the chain to a previous state. Revert(sc_cli::RevertCmd), + /// Run Instant Seal + RunInstantSeal(sc_cli::RunCmd), + /// The custom benchmark subcommmand benchmarking runtime pallets. #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] Benchmark(frame_benchmarking_cli::BenchmarkCmd), diff --git a/parachain-template/node/src/command.rs b/parachain-template/node/src/command.rs index 4083f5ae..08bb9a7e 100644 --- a/parachain-template/node/src/command.rs +++ b/parachain-template/node/src/command.rs @@ -126,7 +126,8 @@ macro_rules! construct_async_run { RuntimeApi, TemplateRuntimeExecutor, >( - &$config, + // We default to the non-parachain import queue and select chain. + &$config, false, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) @@ -242,6 +243,13 @@ pub fn run() -> Result<()> { You can enable it with `--features runtime-benchmarks`." .into()) }, + Some(Subcommand::RunInstantSeal(run_cmd)) => { + let runner = cli.create_runner(run_cmd)?; + runner.run_node_until_exit(|config| async move { + crate::service::start_instant_seal_node(config) + .map_err(sc_cli::Error::Service) + }) + }, None => { let runner = cli.create_runner(&cli.run.normalize())?; diff --git a/parachain-template/node/src/service.rs b/parachain-template/node/src/service.rs index fdca22e4..769e3268 100644 --- a/parachain-template/node/src/service.rs +++ b/parachain-template/node/src/service.rs @@ -9,7 +9,7 @@ use parachain_template_runtime::{ }; use nimbus_consensus::{ - build_nimbus_consensus, BuildNimbusConsensusParams, + build_nimbus_consensus, BuildNimbusConsensusParams, NimbusManualSealConsensusDataProvider, }; // Cumulus Imports @@ -19,6 +19,7 @@ use cumulus_client_service::{ prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, }; use cumulus_primitives_core::ParaId; +use cumulus_primitives_parachain_inherent::MockValidationDataInherentDataProvider; // Substrate Imports use sc_executor::NativeElseWasmExecutor; @@ -29,6 +30,7 @@ use sp_api::ConstructRuntimeApi; use sp_keystore::SyncCryptoStorePtr; use sp_runtime::traits::BlakeTwo256; use substrate_prometheus_endpoint::Registry; +use sc_consensus_manual_seal::{run_instant_seal, InstantSealParams}; /// Native executor instance. pub struct TemplateRuntimeExecutor; @@ -52,11 +54,12 @@ impl sc_executor::NativeExecutionDispatch for TemplateRuntimeExecutor { #[allow(clippy::type_complexity)] pub fn new_partial( config: &Configuration, + parachain: bool, ) -> Result< PartialComponents< TFullClient>, TFullBackend, - (), + sc_consensus::LongestChain, Block>, sc_consensus::DefaultImportQueue< Block, TFullClient>, @@ -117,6 +120,10 @@ where telemetry }); + // Although this will not be used by the parachain collator, it will be used by the instant seal + // And sovereign nodes, so we create it anyway. + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + let transaction_pool = sc_transaction_pool::BasicPool::new_full( config.transaction_pool.clone(), config.role.is_authority().into(), @@ -135,6 +142,7 @@ where }, &task_manager.spawn_essential_handle(), config.prometheus_registry().clone(), + parachain, )?; let params = PartialComponents { @@ -144,7 +152,7 @@ where keystore_container, task_manager, transaction_pool, - select_chain: (), + select_chain, other: (telemetry, telemetry_worker_handle), }; @@ -211,7 +219,7 @@ where let parachain_config = prepare_node_config(parachain_config); - let params = new_partial::(¶chain_config)?; + let params = new_partial::(¶chain_config, true)?; let (mut telemetry, telemetry_worker_handle) = params.other; let relay_chain_full_node = @@ -400,3 +408,118 @@ pub async fn start_parachain_node( ) .await } + +/// Builds a new service for a full client. +pub fn start_instant_seal_node(config: Configuration) -> Result { + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (mut telemetry, _), + } = new_partial::(&config, false)?; + + let (network, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + warp_sync: None, + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers( + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), + ); + } + + let is_authority = config.role.is_authority(); + let prometheus_registry = config.prometheus_registry().cloned(); + + let rpc_extensions_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + + Box::new(move |deny_unsafe, _| { + let deps = crate::rpc::FullDeps { + client: client.clone(), + pool: transaction_pool.clone(), + deny_unsafe, + }; + + Ok(crate::rpc::create_full(deps)) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network, + client: client.clone(), + keystore: keystore_container.sync_keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_extensions_builder, + on_demand: None, + remote_blockchain: None, + backend, + system_rpc_tx, + config, + telemetry: telemetry.as_mut(), + })?; + + if is_authority { + let proposer = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|t| t.handle()), + ); + + let authorship_future = run_instant_seal(InstantSealParams { + block_import: client.clone(), + env: proposer, + client: client.clone(), + pool: transaction_pool.clone(), + select_chain, + consensus_data_provider: Some(Box::new( + NimbusManualSealConsensusDataProvider{ + keystore: keystore_container.sync_keystore(), + client: client.clone() + } + )), + create_inherent_data_providers: |_block, _extra_args| { + async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + // The nimbus runtime is shared among all nodes including the parachain node. + // Because this is not a parachain context, we need to mock the parachain inherent data provider. + //TODO might need to go back and get the block number like how I do in Moonbeam + let mocked_parachain = MockValidationDataInherentDataProvider { + current_para_block: 0, + relay_offset: 0, + relay_blocks_per_para_block: 0, + }; + + Ok((time, mocked_parachain)) + } + } + }); + + task_manager + .spawn_essential_handle() + .spawn_blocking("instant-seal", authorship_future); + }; + + network_starter.start_network(); + Ok(task_manager) +}