diff --git a/Cargo.lock b/Cargo.lock index ff6e3c06313f1..df96a2140abee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4786,17 +4786,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "derive-syn-parse" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79116f119dd1dba1abf1f3405f03b9b0e79a27a3883864bfebded8a3dc768cd" -dependencies = [ - "proc-macro2 1.0.82", - "quote 1.0.35", - "syn 1.0.109", -] - [[package]] name = "derive-syn-parse" version = "0.2.0" @@ -4980,7 +4969,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a081e51fb188742f5a7a1164ad752121abcb22874b21e2c3b0dd040c515fdad" dependencies = [ "common-path", - "derive-syn-parse 0.2.0", + "derive-syn-parse", "once_cell", "proc-macro2 1.0.82", "quote 1.0.35", @@ -6005,7 +5994,7 @@ version = "23.0.0" dependencies = [ "Inflector", "cfg-expr", - "derive-syn-parse 0.2.0", + "derive-syn-parse", "expander", "frame-support-procedural-tools", "itertools 0.11.0", @@ -8431,9 +8420,9 @@ dependencies = [ [[package]] name = "macro_magic" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e03844fc635e92f3a0067e25fa4bf3e3dbf3f2927bf3aa01bb7bc8f1c428949d" +checksum = "cc33f9f0351468d26fbc53d9ce00a096c8522ecb42f19b50f34f2c422f76d21d" dependencies = [ "macro_magic_core", "macro_magic_macros", @@ -8443,12 +8432,12 @@ dependencies = [ [[package]] name = "macro_magic_core" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "468155613a44cfd825f1fb0ffa532b018253920d404e6fca1e8d43155198a46d" +checksum = "1687dc887e42f352865a393acae7cf79d98fab6351cde1f58e9e057da89bf150" dependencies = [ "const-random", - "derive-syn-parse 0.1.5", + "derive-syn-parse", "macro_magic_core_macros", "proc-macro2 1.0.82", "quote 1.0.35", @@ -8457,9 +8446,9 @@ dependencies = [ [[package]] name = "macro_magic_core_macros" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" +checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", @@ -8468,9 +8457,9 @@ dependencies = [ [[package]] name = "macro_magic_macros" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" +checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote 1.0.35", @@ -20522,6 +20511,7 @@ dependencies = [ "sp-tracing 16.0.0", "sp-weights", "substrate-test-runtime-client", + "tracing", "zstd 0.12.4", ] @@ -21498,7 +21488,6 @@ dependencies = [ "frame-system", "frame-system-rpc-runtime-api", "futures", - "hex-literal", "log", "pallet-babe", "pallet-balances", @@ -21536,6 +21525,7 @@ dependencies = [ "sp-version", "substrate-test-runtime-client", "substrate-wasm-builder", + "tracing", "trie-db", ] diff --git a/Cargo.toml b/Cargo.toml index db9a2bd722735..5c2677fffeb22 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -819,7 +819,7 @@ linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } litep2p = { version = "0.6.2" } log = { version = "0.4.21", default-features = false } -macro_magic = { version = "0.5.0" } +macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } memmap2 = { version = "0.9.3" } memory-db = { version = "0.32.0", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 836939f1af4de..e8772c0b48303 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -118,7 +118,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 14cae3b53967f..55c8a9f0b265d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -117,7 +117,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 9aa1eeb1a4bbd..57ed5ec258d2c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -214,7 +214,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 5d4e41bd706db..05d6cdfd691e6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -189,7 +189,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 909d5dc60942f..f37af88c28436 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -122,7 +122,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index ab779b31c72e1..d2fe0689f5155 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -142,7 +142,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 17030da51ece8..6e36539c7bf79 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -144,7 +144,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 1bd7e143c1b87..74fdd971f5ce0 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -143,7 +143,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index b8a328c3db696..a204bb7276cfe 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 63dd863a861b6..ff31aba8a2771 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -132,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 84655972b0cd6..6adaa4b4e5020 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -132,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index bf45b437f8bb0..40f2b78ffd6d5 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/polkadot-parachain/src/cli.rs b/cumulus/polkadot-parachain/src/cli.rs index 7c01e34f9a03c..d06354dda2205 100644 --- a/cumulus/polkadot-parachain/src/cli.rs +++ b/cumulus/polkadot-parachain/src/cli.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use crate::common::NodeExtraArgs; use clap::{Command, CommandFactory, FromArgMatches}; use sc_cli::SubstrateCli; use std::path::PathBuf; @@ -94,6 +95,12 @@ pub struct Cli { pub relay_chain_args: Vec, } +impl Cli { + pub(crate) fn node_extra_args(&self) -> NodeExtraArgs { + NodeExtraArgs { use_slot_based_consensus: self.experimental_use_slot_based } + } +} + #[derive(Debug)] pub struct RelayChainCli { /// The actual relay chain cli object. diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 323216f300d85..fcf6c06f42227 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -14,15 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +#[cfg(feature = "runtime-benchmarks")] +use crate::service::Block; use crate::{ chain_spec, chain_spec::GenericChainSpec, cli::{Cli, RelayChainCli, Subcommand}, + common::NodeExtraArgs, fake_runtime_api::{ - asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, aura::RuntimeApi, + asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, + aura::RuntimeApi as AuraRuntimeApi, }, - service::{new_partial, Block, Hash}, + service::{new_aura_node_spec, DynNodeSpec, ShellNode}, }; +#[cfg(feature = "runtime-benchmarks")] use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; @@ -34,6 +39,8 @@ use sc_cli::{ }; use sc_service::config::{BasePath, PrometheusConfig}; use sp_runtime::traits::AccountIdConversion; +#[cfg(feature = "runtime-benchmarks")] +use sp_runtime::traits::HashingFor; use std::{net::SocketAddr, path::PathBuf}; /// The choice of consensus for the parachain omni-node. @@ -110,6 +117,7 @@ fn runtime(id: &str) -> Runtime { } else if id.starts_with("asset-hub-kusama") | id.starts_with("statemine") | id.starts_with("asset-hub-rococo") | + id.starts_with("rockmine") | id.starts_with("asset-hub-westend") | id.starts_with("westmint") { @@ -378,146 +386,27 @@ impl SubstrateCli for RelayChainCli { } } -/// Creates partial components for the runtimes that are supported by the benchmarks. -macro_rules! construct_partials { - ($config:expr, |$partials:ident| $code:expr) => { - match $config.chain_spec.runtime()? { - Runtime::AssetHubPolkadot => { - let $partials = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, - )?; - $code - }, - Runtime::AssetHub | - Runtime::BridgeHub(_) | - Runtime::Collectives | - Runtime::Coretime(_) | - Runtime::People(_) => { - let $partials = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AuraId>, - )?; - $code - }, - Runtime::Glutton | Runtime::Shell | Runtime::Seedling => { - let $partials = new_partial::( - &$config, - crate::service::build_shell_import_queue, - )?; - $code - }, - Runtime::ContractsRococo | Runtime::Penpal(_) => { - let $partials = new_partial::( - &$config, - crate::service::build_aura_import_queue, - )?; - $code - }, - Runtime::Omni(consensus) => match consensus { - Consensus::Aura => { - let $partials = new_partial::( - &$config, - crate::service::build_aura_import_queue, - )?; - $code - }, - Consensus::Relay => { - let $partials = new_partial::( - &$config, - crate::service::build_shell_import_queue, - )?; - $code - }, - }, - } - }; -} - -macro_rules! construct_async_run { - (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ - let runner = $cli.create_runner($cmd)?; - match runner.config().chain_spec.runtime()? { - Runtime::AssetHubPolkadot => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::AssetHub | - Runtime::BridgeHub(_) | - Runtime::Collectives | - Runtime::Coretime(_) | - Runtime::People(_) => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Shell | - Runtime::Seedling | - Runtime::Glutton => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::build_shell_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - } - Runtime::ContractsRococo | Runtime::Penpal(_) => { - runner.async_run(|$config| { - let $components = new_partial::< - RuntimeApi, - _, - >( - &$config, - crate::service::build_aura_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Omni(consensus) => match consensus { - Consensus::Aura => { - runner.async_run(|$config| { - let $components = new_partial::< - RuntimeApi, - _, - >( - &$config, - crate::service::build_aura_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Consensus::Relay - => { - runner.async_run(|$config| { - let $components = new_partial::< - RuntimeApi, - _, - >( - &$config, - crate::service::build_shell_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - } - } - }} +fn new_node_spec( + config: &sc_service::Configuration, + extra_args: NodeExtraArgs, +) -> std::result::Result, sc_cli::Error> { + Ok(match config.chain_spec.runtime()? { + Runtime::AssetHubPolkadot => + new_aura_node_spec::(extra_args), + Runtime::AssetHub | + Runtime::BridgeHub(_) | + Runtime::Collectives | + Runtime::Coretime(_) | + Runtime::People(_) | + Runtime::ContractsRococo | + Runtime::Glutton | + Runtime::Penpal(_) => new_aura_node_spec::(extra_args), + Runtime::Shell | Runtime::Seedling => Box::new(ShellNode), + Runtime::Omni(consensus) => match consensus { + Consensus::Aura => new_aura_node_spec::(extra_args), + Consensus::Relay => Box::new(ShellNode), + }, + }) } /// Parse command line arguments into service configuration. @@ -530,28 +419,40 @@ pub fn run() -> Result<()> { runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) }, Some(Subcommand::CheckBlock(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_check_block_cmd(config, cmd) }) }, Some(Subcommand::ExportBlocks(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, config.database)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_export_blocks_cmd(config, cmd) }) }, Some(Subcommand::ExportState(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, config.chain_spec)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_export_state_cmd(config, cmd) }) }, Some(Subcommand::ImportBlocks(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_import_blocks_cmd(config, cmd) + }) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_revert_cmd(config, cmd) }) }, - Some(Subcommand::Revert(cmd)) => construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.backend, None)) - }), Some(Subcommand::PurgeChain(cmd)) => { let runner = cli.create_runner(cmd)?; let polkadot_cli = RelayChainCli::new(runner.config(), cli.relay_chain_args.iter()); @@ -569,8 +470,10 @@ pub fn run() -> Result<()> { }, Some(Subcommand::ExportGenesisHead(cmd)) => { let runner = cli.create_runner(cmd)?; - runner - .sync_run(|config| construct_partials!(config, |partials| cmd.run(partials.client))) + runner.sync_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.run_export_genesis_head_cmd(config, cmd) + }) }, Some(Subcommand::ExportGenesisWasm(cmd)) => { let runner = cli.create_runner(cmd)?; @@ -584,40 +487,28 @@ pub fn run() -> Result<()> { // Switch on the concrete benchmark sub-command- match cmd { - BenchmarkCmd::Pallet(cmd) => - if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run_with_spec::, ReclaimHostFunctions>(Some(config.chain_spec))) - } else { - Err("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`." - .into()) - }, + #[cfg(feature = "runtime-benchmarks")] + BenchmarkCmd::Pallet(cmd) => runner.sync_run(|config| { + cmd.run_with_spec::, ReclaimHostFunctions>(Some( + config.chain_spec, + )) + }), BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { - construct_partials!(config, |partials| cmd.run(partials.client)) + let node = new_node_spec(&config, cli.node_extra_args())?; + node.run_benchmark_block_cmd(config, cmd) }), - #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => - return Err(sc_cli::Error::Input( - "Compile with --features=runtime-benchmarks \ - to enable storage benchmarks." - .into(), - ) - .into()), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { - construct_partials!(config, |partials| { - let db = partials.backend.expose_db(); - let storage = partials.backend.expose_storage(); - - cmd.run(config, partials.client.clone(), db, storage) - }) + let node = new_node_spec(&config, cli.node_extra_args())?; + node.run_benchmark_storage_cmd(config, cmd) }), BenchmarkCmd::Machine(cmd) => runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())), - // NOTE: this allows the Client to leniently implement - // new benchmark commands without requiring a companion MR. #[allow(unreachable_patterns)] - _ => Err("Benchmarking sub-command unsupported".into()), + _ => Err("Benchmarking sub-command unsupported or compilation feature missing. \ + Make sure to compile with --features=runtime-benchmarks \ + to enable all supported benchmarks." + .into()), } }, Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?), @@ -645,25 +536,33 @@ pub fn run() -> Result<()> { if old_path.exists() && new_path.exists() { return Err(format!( - "Found legacy {} path {} and new asset-hub path {}. Delete one path such that only one exists.", - old_name, old_path.display(), new_path.display() - ).into()) + "Found legacy {} path {} and new Asset Hub path {}. \ + Delete one path such that only one exists.", + old_name, + old_path.display(), + new_path.display() + ) + .into()) } if old_path.exists() { std::fs::rename(old_path.clone(), new_path.clone())?; info!( - "Statemint renamed to Asset Hub. The filepath with associated data on disk has been renamed from {} to {}.", - old_path.display(), new_path.display() + "{} was renamed to Asset Hub. The filepath with associated data on disk \ + has been renamed from {} to {}.", + old_name, + old_path.display(), + new_path.display() ); } } - let hwbench = (!cli.no_hardware_benchmarks).then_some( - config.database.path().map(|database_path| { + let hwbench = (!cli.no_hardware_benchmarks) + .then_some(config.database.path().map(|database_path| { let _ = std::fs::create_dir_all(database_path); sc_sysinfo::gather_hwbench(Some(database_path)) - })).flatten(); + })) + .flatten(); let para_id = chain_spec::Extensions::try_get(&*config.chain_spec) .map(|e| e.para_id) @@ -672,7 +571,9 @@ pub fn run() -> Result<()> { let id = ParaId::from(para_id); let parachain_account = - AccountIdConversion::::into_account_truncating(&id); + AccountIdConversion::::into_account_truncating( + &id, + ); let tokio_handle = config.tokio_handle.clone(); let polkadot_config = @@ -683,209 +584,34 @@ pub fn run() -> Result<()> { info!("๐Ÿงพ Parachain Account: {}", parachain_account); info!("โœ๏ธ Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - match config.network.network_backend { - sc_network::config::NetworkBackendType::Libp2p => - start_node::>( - config, - polkadot_config, - collator_options, - id, - cli.experimental_use_slot_based, - hwbench, - ) - .await, - sc_network::config::NetworkBackendType::Litep2p => - start_node::( - config, - polkadot_config, - collator_options, - id, - cli.experimental_use_slot_based, - hwbench, - ) - .await, - } + start_node( + config, + polkadot_config, + collator_options, + id, + cli.node_extra_args(), + hwbench, + ) + .await }) }, } } -async fn start_node>( +#[sc_tracing::logging::prefix_logs_with("Parachain")] +async fn start_node( config: sc_service::Configuration, polkadot_config: sc_service::Configuration, collator_options: cumulus_client_cli::CollatorOptions, id: ParaId, - use_experimental_slot_based: bool, + extra_args: NodeExtraArgs, hwbench: Option, ) -> Result { - match config.chain_spec.runtime()? { - Runtime::AssetHubPolkadot => - crate::service::start_asset_hub_async_backing_node::< - AssetHubPolkadotRuntimeApi, - AssetHubPolkadotAuraId, - Network, - >(config, polkadot_config, collator_options, id, use_experimental_slot_based, hwbench) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::AssetHub | Runtime::Collectives => - crate::service::start_generic_aura_async_backing_node::( - config, - polkadot_config, - collator_options, - id, - use_experimental_slot_based, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::Seedling | Runtime::Shell => crate::service::start_shell_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) + let node_spec = new_node_spec(&config, extra_args)?; + node_spec + .start_node(config, polkadot_config, collator_options, id, hwbench) .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::ContractsRococo => crate::service::start_contracts_rococo_node::( - config, - polkadot_config, - collator_options, - id, - use_experimental_slot_based, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment | - chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => - crate::service::start_generic_aura_async_backing_node::( - config, - polkadot_config, - collator_options, - id, - use_experimental_slot_based, - hwbench, - ) - .await - .map(|r| r.0), - } - .map_err(Into::into), - - Runtime::Coretime(coretime_runtime_type) => match coretime_runtime_type { - chain_spec::coretime::CoretimeRuntimeType::Kusama | - chain_spec::coretime::CoretimeRuntimeType::KusamaLocal | - chain_spec::coretime::CoretimeRuntimeType::Polkadot | - chain_spec::coretime::CoretimeRuntimeType::PolkadotLocal | - chain_spec::coretime::CoretimeRuntimeType::Rococo | - chain_spec::coretime::CoretimeRuntimeType::RococoLocal | - chain_spec::coretime::CoretimeRuntimeType::RococoDevelopment | - chain_spec::coretime::CoretimeRuntimeType::Westend | - chain_spec::coretime::CoretimeRuntimeType::WestendLocal | - chain_spec::coretime::CoretimeRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_async_backing_node::( - config, - polkadot_config, - collator_options, - id, - use_experimental_slot_based, - hwbench, - ) - .await - .map(|r| r.0), - } - .map_err(Into::into), - - Runtime::Penpal(_) => crate::service::start_rococo_parachain_node::( - config, - polkadot_config, - collator_options, - id, - use_experimental_slot_based, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::Glutton => crate::service::start_basic_async_backing_node::( - config, - polkadot_config, - collator_options, - id, - use_experimental_slot_based, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::People(people_runtime_type) => match people_runtime_type { - chain_spec::people::PeopleRuntimeType::Kusama | - chain_spec::people::PeopleRuntimeType::KusamaLocal | - chain_spec::people::PeopleRuntimeType::Polkadot | - chain_spec::people::PeopleRuntimeType::PolkadotLocal | - chain_spec::people::PeopleRuntimeType::Rococo | - chain_spec::people::PeopleRuntimeType::RococoLocal | - chain_spec::people::PeopleRuntimeType::RococoDevelopment | - chain_spec::people::PeopleRuntimeType::Westend | - chain_spec::people::PeopleRuntimeType::WestendLocal | - chain_spec::people::PeopleRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_async_backing_node::( - config, - polkadot_config, - collator_options, - id, - use_experimental_slot_based, - hwbench, - ) - .await - .map(|r| r.0), - } - .map_err(Into::into), - Runtime::Omni(consensus) => match consensus { - // rococo actually uses aura import and consensus, unlike most system chains that use - // relay to aura. - Consensus::Aura => crate::service::start_rococo_parachain_node::( - config, - polkadot_config, - collator_options, - id, - use_experimental_slot_based, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - Consensus::Relay => crate::service::start_shell_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - }, - } + .map_err(Into::into) } impl DefaultConfigurationValues for RelayChainCli { diff --git a/cumulus/polkadot-parachain/src/common/mod.rs b/cumulus/polkadot-parachain/src/common/mod.rs index 5adbb4137cd3d..9f5febafe3042 100644 --- a/cumulus/polkadot-parachain/src/common/mod.rs +++ b/cumulus/polkadot-parachain/src/common/mod.rs @@ -65,3 +65,8 @@ where { type BoundedRuntimeApi = T::RuntimeApi; } + +/// Extra args that are passed when creating a new node spec. +pub struct NodeExtraArgs { + pub use_slot_based_consensus: bool, +} diff --git a/cumulus/polkadot-parachain/src/rpc.rs b/cumulus/polkadot-parachain/src/rpc.rs index 7437bb1f4b937..283a73d931d76 100644 --- a/cumulus/polkadot-parachain/src/rpc.rs +++ b/cumulus/polkadot-parachain/src/rpc.rs @@ -18,91 +18,82 @@ #![warn(missing_docs)] -use std::sync::Arc; - +use crate::{ + common::ConstructNodeRuntimeApi, + service::{ParachainBackend, ParachainClient}, +}; +use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use parachains_common::{AccountId, Balance, Block, Nonce}; -use sc_client_api::AuxStore; -pub use sc_rpc::DenyUnsafe; -use sc_transaction_pool_api::TransactionPool; -use sp_api::ProvideRuntimeApi; -use sp_block_builder::BlockBuilder; -use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sc_rpc::{ + dev::{Dev, DevApiServer}, + DenyUnsafe, +}; +use std::{marker::PhantomData, sync::Arc}; +use substrate_frame_rpc_system::{System, SystemApiServer}; +use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; /// A type representing all RPC extensions. pub type RpcExtension = jsonrpsee::RpcModule<()>; -/// Full client dependencies -pub struct FullDeps { - /// The client instance to use. - pub client: Arc, - /// Transaction pool instance. - pub pool: Arc

, - /// Whether to deny unsafe calls - pub deny_unsafe: DenyUnsafe, +pub(crate) trait BuildRpcExtensions { + fn build_rpc_extensions( + deny_unsafe: DenyUnsafe, + client: Arc, + backend: Arc, + pool: Arc, + ) -> sc_service::error::Result; } -/// Instantiate all RPC extensions. -pub fn create_full( - deps: FullDeps, - backend: Arc, -) -> Result> +pub(crate) struct BuildEmptyRpcExtensions(PhantomData); + +impl + BuildRpcExtensions< + ParachainClient, + ParachainBackend, + sc_transaction_pool::FullPool>, + > for BuildEmptyRpcExtensions where - C: ProvideRuntimeApi - + HeaderBackend - + AuxStore - + HeaderMetadata - + Send - + Sync - + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: BlockBuilder, - P: TransactionPool + Sync + Send + 'static, - B: sc_client_api::Backend + Send + Sync + 'static, - B::State: sc_client_api::backend::StateBackend>, + RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static, { - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; - use substrate_frame_rpc_system::{System, SystemApiServer}; - use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; - - let mut module = RpcExtension::new(()); - let FullDeps { client, pool, deny_unsafe } = deps; - - module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; - module.merge(TransactionPayment::new(client.clone()).into_rpc())?; - module.merge(StateMigration::new(client, backend, deny_unsafe).into_rpc())?; - - Ok(module) + fn build_rpc_extensions( + _deny_unsafe: DenyUnsafe, + _client: Arc>, + _backend: Arc, + _pool: Arc>>, + ) -> sc_service::error::Result { + Ok(RpcExtension::new(())) + } } -/// Instantiate all RPCs we want at the contracts-rococo chain. -pub fn create_contracts_rococo( - deps: FullDeps, -) -> Result> +pub(crate) struct BuildParachainRpcExtensions(PhantomData); + +impl + BuildRpcExtensions< + ParachainClient, + ParachainBackend, + sc_transaction_pool::FullPool>, + > for BuildParachainRpcExtensions where - C: ProvideRuntimeApi - + sc_client_api::BlockBackend - + HeaderBackend - + AuxStore - + HeaderMetadata - + Send - + Sync - + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: BlockBuilder, - P: TransactionPool + Sync + Send + 'static, + RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + substrate_frame_rpc_system::AccountNonceApi, { - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; - use sc_rpc::dev::{Dev, DevApiServer}; - use substrate_frame_rpc_system::{System, SystemApiServer}; - - let mut module = RpcExtension::new(()); - let FullDeps { client, pool, deny_unsafe } = deps; - - module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; - module.merge(TransactionPayment::new(client.clone()).into_rpc())?; - module.merge(Dev::new(client, deny_unsafe).into_rpc())?; - - Ok(module) + fn build_rpc_extensions( + deny_unsafe: DenyUnsafe, + client: Arc>, + backend: Arc, + pool: Arc>>, + ) -> sc_service::error::Result { + let build = || -> Result> { + let mut module = RpcExtension::new(()); + + module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; + module.merge(TransactionPayment::new(client.clone()).into_rpc())?; + module.merge(StateMigration::new(client.clone(), backend, deny_unsafe).into_rpc())?; + module.merge(Dev::new(client, deny_unsafe).into_rpc())?; + + Ok(module) + }; + build().map_err(Into::into) + } } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 0f2aed8ee4d85..f5f6189d1f0d6 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use cumulus_client_cli::CollatorOptions; +use cumulus_client_cli::{CollatorOptions, ExportGenesisHeadCommand}; use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::collators::{ lookahead::{self as aura, Params as AuraParams}, @@ -22,6 +22,7 @@ use cumulus_client_consensus_aura::collators::{ }; use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; use cumulus_client_consensus_proposer::Proposer; +use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] use cumulus_client_service::old_consensus; use cumulus_client_service::{ @@ -30,39 +31,40 @@ use cumulus_client_service::{ }; use cumulus_primitives_core::{relay_chain::ValidationCode, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; -use sc_rpc::DenyUnsafe; - -use jsonrpsee::RpcModule; use crate::{ common::{ aura::{AuraIdT, AuraRuntimeApi}, - ConstructNodeRuntimeApi, + ConstructNodeRuntimeApi, NodeExtraArgs, }, fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi, - rpc, + rpc::BuildRpcExtensions, }; -pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Nonce}; +pub use parachains_common::{AccountId, Balance, Block, Hash, Nonce}; -use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; +use crate::rpc::{BuildEmptyRpcExtensions, BuildParachainRpcExtensions}; +use frame_benchmarking_cli::BlockCmd; +#[cfg(any(feature = "runtime-benchmarks"))] +use frame_benchmarking_cli::StorageCmd; use futures::prelude::*; +use polkadot_primitives::CollatorPair; use prometheus_endpoint::Registry; +use sc_cli::{CheckBlockCmd, ExportBlocksCmd, ExportStateCmd, ImportBlocksCmd, RevertCmd}; use sc_client_api::BlockchainEvents; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, - BlockImportParams, ImportQueue, + BlockImportParams, DefaultImportQueue, ImportQueue, }; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::{config::FullNetworkConfiguration, service::traits::NetworkBackend, NetworkBlock}; -use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; +use sc_service::{Configuration, Error, PartialComponents, TFullBackend, TFullClient, TaskManager}; +use sc_sysinfo::HwBench; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; -use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi}; -use sp_consensus_aura::AuraApi; +use sc_transaction_pool::FullPool; +use sp_api::ProvideRuntimeApi; use sp_keystore::KeystorePtr; use sp_runtime::{app_crypto::AppCrypto, traits::Header as HeaderT}; -use std::{marker::PhantomData, sync::Arc, time::Duration}; - -use polkadot_primitives::CollatorPair; +use std::{marker::PhantomData, pin::Pin, sync::Arc, time::Duration}; #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = cumulus_client_service::ParachainHostFunctions; @@ -73,9 +75,9 @@ type HostFunctions = ( frame_benchmarking::benchmarking::HostFunctions, ); -type ParachainClient = TFullClient>; +pub type ParachainClient = TFullClient>; -type ParachainBackend = TFullBackend; +pub type ParachainBackend = TFullBackend; type ParachainBlockImport = TParachainBlockImport>, ParachainBackend>; @@ -90,413 +92,312 @@ pub type Service = PartialComponents< (ParachainBlockImport, Option, Option), >; -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -pub fn new_partial( - config: &Configuration, - build_import_queue: BIQ, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructNodeRuntimeApi>, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, -{ - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let heap_pages = config - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ }); - - let executor = sc_executor::WasmExecutor::::builder() - .with_execution_method(config.wasm_method) - .with_max_runtime_instances(config.max_runtime_instances) - .with_runtime_cache_size(config.runtime_cache_size) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .build(); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_record_import::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - true, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - let import_queue = build_import_queue( - client.clone(), - block_import.clone(), - config, - telemetry.as_ref().map(|telemetry| telemetry.handle()), - &task_manager, - )?; - - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle), - }) +pub(crate) trait BuildImportQueue { + fn build_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result>; } -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node_impl( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - sybil_resistance_level: CollatorSybilResistance, - para_id: ParaId, - rpc_ext_builder: RB, - build_import_queue: BIQ, - start_consensus: SC, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +pub(crate) trait StartConsensus where RuntimeApi: ConstructNodeRuntimeApi>, - RB: Fn( - DenyUnsafe, - Arc>, - Arc, - Arc>>, - ) -> Result, sc_service::Error> - + 'static, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, - SC: FnOnce( - Arc>, - ParachainBlockImport, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - KeystorePtr, - Duration, - ParaId, - CollatorPair, - OverseerHandle, - Arc>) + Send + Sync>, - Arc, - ) -> Result<(), sc_service::Error>, - Net: NetworkBackend, { - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial::(¶chain_config, build_import_queue)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network); - - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level, - }) - .await?; - - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - let backend_for_rpc = backend.clone(); - - Box::new(move |deny_unsafe, _| { - rpc_ext_builder( - deny_unsafe, - client.clone(), - backend_for_rpc.clone(), - transaction_pool.clone(), - ) - }) - }; - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, + ) -> Result<(), sc_service::Error>; +} - let relay_chain_slot_duration = Duration::from_secs(6); +pub(crate) trait NodeSpec { + type RuntimeApi: ConstructNodeRuntimeApi>; + + type BuildImportQueue: BuildImportQueue + 'static; + + type BuildRpcExtensions: BuildRpcExtensions< + ParachainClient, + ParachainBackend, + sc_transaction_pool::FullPool>, + > + 'static; + + type StartConsensus: StartConsensus + 'static; + + const SYBIL_RESISTANCE: CollatorSybilResistance; + + /// Starts a `ServiceBuilder` for a full service. + /// + /// Use this macro if you don't actually need the full service, but just the builder in order to + /// be able to perform chain operations. + fn new_partial(config: &Configuration) -> sc_service::error::Result> { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let heap_pages = config.default_heap_pages.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| { + HeapAllocStrategy::Static { extra_pages: h as _ } + }); + + let executor = sc_executor::WasmExecutor::::builder() + .with_execution_method(config.wasm_method) + .with_max_runtime_instances(config.max_runtime_instances) + .with_runtime_cache_size(config.runtime_cache_size) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .build(); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts_record_import::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + true, + )?; + let client = Arc::new(client); + + let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), - para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; - - if validator { - start_consensus( + let import_queue = Self::BuildImportQueue::build_import_queue( client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), + block_import.clone(), + config, + telemetry.as_ref().map(|telemetry| telemetry.handle()), &task_manager, - relay_chain_interface.clone(), - transaction_pool, - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), )?; + + Ok(PartialComponents { + backend, + client, + import_queue, + keystore_container, + task_manager, + transaction_pool, + select_chain: (), + other: (block_import, telemetry, telemetry_worker_handle), + }) } - start_network.start_network(); + /// Start a node with the given parachain spec. + /// + /// This is the actual implementation that is abstract over the executor and the runtime api. + fn start_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, + ) -> Pin>>> + where + Net: NetworkBackend, + { + Box::pin(async move { + let parachain_config = prepare_node_config(parachain_config); + + let params = Self::new_partial(¶chain_config)?; + let (block_import, mut telemetry, telemetry_worker_handle) = params.other; + + let client = params.client.clone(); + let backend = params.backend.clone(); + + let mut task_manager = params.task_manager; + let (relay_chain_interface, collator_key) = build_relay_chain_interface( + polkadot_config, + ¶chain_config, + telemetry_worker_handle, + &mut task_manager, + collator_options.clone(), + hwbench.clone(), + ) + .await + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + + let validator = parachain_config.role.is_authority(); + let prometheus_registry = parachain_config.prometheus_registry().cloned(); + let transaction_pool = params.transaction_pool.clone(); + let import_queue_service = params.import_queue.service(); + let net_config = FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network); + + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + build_network(BuildNetworkParams { + parachain_config: ¶chain_config, + net_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + para_id, + spawn_handle: task_manager.spawn_handle(), + relay_chain_interface: relay_chain_interface.clone(), + import_queue: params.import_queue, + sybil_resistance_level: Self::SYBIL_RESISTANCE, + }) + .await?; + + let rpc_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend_for_rpc = backend.clone(); + + Box::new(move |deny_unsafe, _| { + Self::BuildRpcExtensions::build_rpc_extensions( + deny_unsafe, + client.clone(), + backend_for_rpc.clone(), + transaction_pool.clone(), + ) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + rpc_builder, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + config: parachain_config, + keystore: params.keystore_container.keystore(), + backend: backend.clone(), + network: network.clone(), + sync_service: sync_service.clone(), + system_rpc_tx, + tx_handler_controller, + telemetry: telemetry.as_mut(), + })?; + + if let Some(hwbench) = hwbench { + sc_sysinfo::print_hwbench(&hwbench); + if validator { + warn_if_slow_hardware(&hwbench); + } - Ok((task_manager, client)) -} + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + ); + } + } -/// Build the import queue for Aura-based runtimes. -pub fn build_aura_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - cumulus_client_consensus_aura::import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - >(cumulus_client_consensus_aura::ImportQueueParams { - block_import, - client, - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, - registry: config.prometheus_registry(), - spawner: &task_manager.spawn_essential_handle(), - telemetry, - }) - .map_err(Into::into) -} + let announce_block = { + let sync_service = sync_service.clone(); + Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + }; + + let relay_chain_slot_duration = Duration::from_secs(6); + + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + + start_relay_chain_tasks(StartRelayChainTasksParams { + client: client.clone(), + announce_block: announce_block.clone(), + para_id, + relay_chain_interface: relay_chain_interface.clone(), + task_manager: &mut task_manager, + da_recovery_profile: if validator { + DARecoveryProfile::Collator + } else { + DARecoveryProfile::FullNode + }, + import_queue: import_queue_service, + relay_chain_slot_duration, + recovery_handle: Box::new(overseer_handle.clone()), + sync_service, + })?; + + if validator { + Self::StartConsensus::start_consensus( + client.clone(), + block_import, + prometheus_registry.as_ref(), + telemetry.as_ref().map(|t| t.handle()), + &task_manager, + relay_chain_interface.clone(), + transaction_pool, + params.keystore_container.keystore(), + relay_chain_slot_duration, + para_id, + collator_key.expect("Command line arguments do not allow this. qed"), + overseer_handle, + announce_block, + backend.clone(), + )?; + } -/// Start a rococo parachain node. -pub async fn start_rococo_parachain_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - use_experimental_slot_based: bool, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - let consensus_starter = if use_experimental_slot_based { - start_slot_based_aura_consensus::<_, AuraId> - } else { - start_lookahead_aura_consensus::<_, AuraId> - }; - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_aura_import_queue, - consensus_starter, - hwbench, - ) - .await -} + start_network.start_network(); -/// Build the import queue for the shell runtime. -pub fn build_shell_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - _: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - cumulus_client_consensus_relay_chain::import_queue( - client, - block_import, - |_, _| async { Ok(()) }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - ) - .map_err(Into::into) + Ok(task_manager) + }) + } } -fn build_parachain_rpc_extensions( - deny_unsafe: sc_rpc::DenyUnsafe, - client: Arc>, - backend: Arc, - pool: Arc>>, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_block_builder::BlockBuilder - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + substrate_frame_rpc_system::AccountNonceApi, -{ - let deps = rpc::FullDeps { client, pool, deny_unsafe }; - - rpc::create_full(deps, backend).map_err(Into::into) +/// Build the import queue for the shell runtime. +pub(crate) struct BuildShellImportQueue(PhantomData); + +impl BuildImportQueue for BuildShellImportQueue { + fn build_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + _telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result> { + cumulus_client_consensus_relay_chain::import_queue( + client, + block_import, + |_, _| async { Ok(()) }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + ) + .map_err(Into::into) + } } -fn build_contracts_rpc_extensions( - deny_unsafe: sc_rpc::DenyUnsafe, - client: Arc>, - _backend: Arc, - pool: Arc>>, -) -> Result, sc_service::Error> { - let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; +pub(crate) struct ShellNode; - crate::rpc::create_contracts_rococo(deps).map_err(Into::into) -} +impl NodeSpec for ShellNode { + type RuntimeApi = FakeRuntimeApi; + type BuildImportQueue = BuildShellImportQueue; + type BuildRpcExtensions = BuildEmptyRpcExtensions; + type StartConsensus = StartRelayChainConsensus; -/// Start a polkadot-shell parachain node. -pub async fn start_shell_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Unresistant, // free-for-all consensus - para_id, - |_, _, _, _| Ok(RpcModule::new(())), - build_shell_import_queue, - start_relay_chain_consensus, - hwbench, - ) - .await + const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Unresistant; } struct Verifier { @@ -527,435 +428,374 @@ where /// Build the import queue for parachain runtimes that started with relay chain consensus and /// switched to aura. -pub fn build_relay_to_aura_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry_handle: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> +pub(crate) struct BuildRelayToAuraImportQueue( + PhantomData<(RuntimeApi, AuraId)>, +); + +impl BuildImportQueue + for BuildRelayToAuraImportQueue where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, AuraId: AuraIdT + Sync, { - let verifier_client = client.clone(); - - let aura_verifier = cumulus_client_consensus_aura::build_verifier::< - ::Pair, - _, - _, - _, - >(cumulus_client_consensus_aura::BuildVerifierParams { - client: verifier_client.clone(), - create_inherent_data_providers: move |parent_hash, _| { - let cidp_client = verifier_client.clone(); - async move { - let slot_duration = - cumulus_client_consensus_aura::slot_duration_at(&*cidp_client, parent_hash)?; - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); + fn build_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result> { + let verifier_client = client.clone(); + + let aura_verifier = + cumulus_client_consensus_aura::build_verifier::<::Pair, _, _, _>( + cumulus_client_consensus_aura::BuildVerifierParams { + client: verifier_client.clone(), + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = verifier_client.clone(); + async move { + let slot_duration = cumulus_client_consensus_aura::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp)) + } + }, + telemetry: telemetry_handle, + }, + ); - Ok((slot, timestamp)) - } - }, - telemetry: telemetry_handle, - }); - - let relay_chain_verifier = - Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>; - - let verifier = Verifier { - client, - relay_chain_verifier, - aura_verifier: Box::new(aura_verifier), - _phantom: PhantomData, - }; + let relay_chain_verifier = + Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })); + + let verifier = Verifier { + client, + relay_chain_verifier, + aura_verifier: Box::new(aura_verifier), + _phantom: PhantomData, + }; - let registry = config.prometheus_registry(); - let spawner = task_manager.spawn_essential_handle(); + let registry = config.prometheus_registry(); + let spawner = task_manager.spawn_essential_handle(); - Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) + Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) + } } /// Uses the lookahead collator to support async backing. /// /// Start an aura powered parachain node. Some system chains use this. -pub async fn start_generic_aura_async_backing_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - use_experimental_slot_based: bool, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - let consensus_starter = if use_experimental_slot_based { - start_slot_based_aura_consensus::<_, AuraId> - } else { - start_lookahead_aura_consensus::<_, AuraId> - }; - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_relay_to_aura_import_queue::<_, AuraId>, - consensus_starter, - hwbench, - ) - .await +pub(crate) struct AuraNode( + pub PhantomData<(RuntimeApi, AuraId, StartConsensus)>, +); + +impl Default for AuraNode { + fn default() -> Self { + Self(Default::default()) + } } -/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub -/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and -/// needs to sync and upgrade before it can run `AuraApi` functions. -/// -/// Uses the lookahead collator to support async backing. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -pub async fn start_asset_hub_async_backing_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - use_experimental_slot_based: bool, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +impl NodeSpec for AuraNode where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + substrate_frame_rpc_system::AccountNonceApi, AuraId: AuraIdT + Sync, - Net: NetworkBackend, + StartConsensus: self::StartConsensus + 'static, { - let consensus_starter = if use_experimental_slot_based { - start_slot_based_aura_consensus::<_, AuraId> - } else { - start_lookahead_aura_consensus::<_, AuraId> - }; - - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions, - build_relay_to_aura_import_queue::<_, AuraId>, - consensus_starter, - hwbench, - ) - .await + type RuntimeApi = RuntimeApi; + type BuildImportQueue = BuildRelayToAuraImportQueue; + type BuildRpcExtensions = BuildParachainRpcExtensions; + type StartConsensus = StartConsensus; + const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Resistant; } -/// Wait for the Aura runtime API to appear on chain. -/// This is useful for chains that started out without Aura. Components that -/// are depending on Aura functionality will wait until Aura appears in the runtime. -async fn wait_for_aura(client: Arc>) +pub fn new_aura_node_spec(extra_args: NodeExtraArgs) -> Box where RuntimeApi: ConstructNodeRuntimeApi>, - RuntimeApi::RuntimeApi: AuraRuntimeApi, + RuntimeApi::RuntimeApi: AuraRuntimeApi + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + substrate_frame_rpc_system::AccountNonceApi, AuraId: AuraIdT + Sync, { - let finalized_hash = client.chain_info().finalized_hash; - if client - .runtime_api() - .has_api::>(finalized_hash) - .unwrap_or(false) - { - return; - }; - - let mut stream = client.finality_notification_stream(); - while let Some(notification) = stream.next().await { - let has_aura_api = client - .runtime_api() - .has_api::>(notification.hash) - .unwrap_or(false); - if has_aura_api { - return; - } + if extra_args.use_slot_based_consensus { + Box::new(AuraNode::< + RuntimeApi, + AuraId, + StartSlotBasedAuraConsensus, + >::default()) + } else { + Box::new(AuraNode::< + RuntimeApi, + AuraId, + StartLookaheadAuraConsensus, + >::default()) } } /// Start relay-chain consensus that is free for all. Everyone can submit a block, the relay-chain /// decides what is backed and included. -fn start_relay_chain_consensus( - client: Arc>, - block_import: ParachainBlockImport, - prometheus_registry: Option<&Registry>, - telemetry: Option, - task_manager: &TaskManager, - relay_chain_interface: Arc, - transaction_pool: Arc>>, - _keystore: KeystorePtr, - _relay_chain_slot_duration: Duration, - para_id: ParaId, - collator_key: CollatorPair, - overseer_handle: OverseerHandle, - announce_block: Arc>) + Send + Sync>, - _backend: Arc, -) -> Result<(), sc_service::Error> { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry, - ); - - let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( - cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { - para_id, - proposer_factory, - block_import, - relay_chain_interface: relay_chain_interface.clone(), - create_inherent_data_providers: move |_, (relay_parent, validation_data)| { - let relay_chain_interface = relay_chain_interface.clone(); - async move { - let parachain_inherent = +pub(crate) struct StartRelayChainConsensus; + +impl StartConsensus for StartRelayChainConsensus { + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + _keystore: KeystorePtr, + _relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + _backend: Arc, + ) -> Result<(), Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry, + ); + + let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( + cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { + para_id, + proposer_factory, + block_import, + relay_chain_interface: relay_chain_interface.clone(), + create_inherent_data_providers: move |_, (relay_parent, validation_data)| { + let relay_chain_interface = relay_chain_interface.clone(); + async move { + let parachain_inherent = cumulus_client_parachain_inherent::ParachainInherentDataProvider::create_at( relay_parent, &relay_chain_interface, &validation_data, para_id, ).await; - let parachain_inherent = parachain_inherent.ok_or_else(|| { - Box::::from( - "Failed to create parachain inherent", - ) - })?; - Ok(parachain_inherent) - } + let parachain_inherent = parachain_inherent.ok_or_else(|| { + Box::::from( + "Failed to create parachain inherent", + ) + })?; + Ok(parachain_inherent) + } + }, }, - }, - ); - - let spawner = task_manager.spawn_handle(); - - // Required for free-for-all consensus - #[allow(deprecated)] - old_consensus::start_collator_sync(old_consensus::StartCollatorParams { - para_id, - block_status: client.clone(), - announce_block, - overseer_handle, - spawner, - key: collator_key, - parachain_consensus: free_for_all, - runtime_api: client.clone(), - }); - - Ok(()) + ); + + let spawner = task_manager.spawn_handle(); + + // Required for free-for-all consensus + #[allow(deprecated)] + old_consensus::start_collator_sync(old_consensus::StartCollatorParams { + para_id, + block_status: client.clone(), + announce_block, + overseer_handle, + spawner, + key: collator_key, + parachain_consensus: free_for_all, + runtime_api: client.clone(), + }); + + Ok(()) + } } /// Start consensus using the lookahead aura collator. -fn start_lookahead_aura_consensus( - client: Arc>, - block_import: ParachainBlockImport, - prometheus_registry: Option<&Registry>, - telemetry: Option, - task_manager: &TaskManager, - relay_chain_interface: Arc, - transaction_pool: Arc>>, - keystore: KeystorePtr, - relay_chain_slot_duration: Duration, - para_id: ParaId, - collator_key: CollatorPair, - overseer_handle: OverseerHandle, - announce_block: Arc>) + Send + Sync>, - backend: Arc, -) -> Result<(), sc_service::Error> +pub(crate) struct StartSlotBasedAuraConsensus( + PhantomData<(RuntimeApi, AuraId)>, +); + +impl StartConsensus + for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, AuraId: AuraIdT + Sync, { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface, - code_hash_provider: { - let client = client.clone(); - move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - } - }, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer: Proposer::new(proposer_factory), - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, - }; + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + _overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, + ) -> Result<(), Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); - let fut = async move { - wait_for_aura(client).await; - aura::run::::Pair, _, _, _, _, _, _, _, _>(params).await; - }; - task_manager.spawn_essential_handle().spawn("aura", None, fut); + let proposer = Proposer::new(proposer_factory); + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); - Ok(()) + let client_for_aura = client.clone(); + let params = SlotBasedParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend.clone(), + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client_for_aura.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, + keystore, + collator_key, + para_id, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, + slot_drift: Duration::from_secs(1), + }; + + let (collation_future, block_builder_future) = + slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); + + task_manager.spawn_essential_handle().spawn( + "collation-task", + Some("parachain-block-authoring"), + collation_future, + ); + task_manager.spawn_essential_handle().spawn( + "block-builder-task", + Some("parachain-block-authoring"), + block_builder_future, + ); + Ok(()) + } } -/// Start consensus using the lookahead aura collator. -fn start_slot_based_aura_consensus( - client: Arc>, - block_import: ParachainBlockImport, - prometheus_registry: Option<&Registry>, - telemetry: Option, - task_manager: &TaskManager, - relay_chain_interface: Arc, - transaction_pool: Arc>>, - keystore: KeystorePtr, - relay_chain_slot_duration: Duration, - para_id: ParaId, - collator_key: CollatorPair, - _overseer_handle: OverseerHandle, - announce_block: Arc>) + Send + Sync>, - backend: Arc, -) -> Result<(), sc_service::Error> +/// Wait for the Aura runtime API to appear on chain. +/// This is useful for chains that started out without Aura. Components that +/// are depending on Aura functionality will wait until Aura appears in the runtime. +async fn wait_for_aura(client: Arc>) where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, AuraId: AuraIdT + Sync, { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let proposer = Proposer::new(proposer_factory); - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let client_for_aura = client.clone(); - let params = SlotBasedParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend.clone(), - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client_for_aura.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - keystore, - collator_key, - para_id, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(2000), - reinitialize: false, - slot_drift: Duration::from_secs(1), + let finalized_hash = client.chain_info().finalized_hash; + if client.runtime_api().has_aura_api(finalized_hash) { + return; }; - let (collation_future, block_builder_future) = - slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); - - task_manager.spawn_essential_handle().spawn( - "collation-task", - Some("parachain-block-authoring"), - collation_future, - ); - task_manager.spawn_essential_handle().spawn( - "block-builder-task", - Some("parachain-block-authoring"), - block_builder_future, - ); - Ok(()) + let mut stream = client.finality_notification_stream(); + while let Some(notification) = stream.next().await { + if client.runtime_api().has_aura_api(notification.hash) { + return; + } + } } -/// Start an aura powered parachain node which uses the lookahead collator to support async backing. -/// This node is basic in the sense that its runtime api doesn't include common contents such as -/// transaction payment. Used for aura glutton. -pub async fn start_basic_async_backing_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - use_experimental_slot_based: bool, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - let consensus_starter = if use_experimental_slot_based { - start_slot_based_aura_consensus::<_, AuraId> - } else { - start_lookahead_aura_consensus::<_, AuraId> - }; - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_, _, _, _| Ok(RpcModule::new(())), - build_relay_to_aura_import_queue::<_, AuraId>, - consensus_starter, - hwbench, - ) - .await -} +/// Start consensus using the lookahead aura collator. +pub(crate) struct StartLookaheadAuraConsensus( + PhantomData<(RuntimeApi, AuraId)>, +); -/// Start a parachain node for Rococo Contracts. -pub async fn start_contracts_rococo_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - use_experimental_slot_based: bool, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - let consensus_starter = if use_experimental_slot_based { - start_slot_based_aura_consensus::<_, AuraId> - } else { - start_lookahead_aura_consensus::<_, AuraId> - }; - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_contracts_rpc_extensions, - build_aura_import_queue, - consensus_starter, - hwbench, - ) - .await +impl StartConsensus + for StartLookaheadAuraConsensus +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, + ) -> Result<(), Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); + + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend, + relay_client: relay_chain_interface, + code_hash_provider: { + let client = client.clone(); + move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + } + }, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer: Proposer::new(proposer_factory), + collator_service, + authoring_duration: Duration::from_millis(1500), + reinitialize: false, + }; + + let fut = async move { + wait_for_aura(client).await; + aura::run::::Pair, _, _, _, _, _, _, _, _>(params).await; + }; + task_manager.spawn_essential_handle().spawn("aura", None, fut); + + Ok(()) + } } /// Checks that the hardware meets the requirements and print a warning otherwise. @@ -970,3 +810,177 @@ fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { ); } } + +type SyncCmdResult = sc_cli::Result<()>; + +type AsyncCmdResult<'a> = + sc_cli::Result<(Pin + 'a>>, TaskManager)>; + +pub(crate) trait DynNodeSpec { + fn prepare_check_block_cmd( + self: Box, + config: Configuration, + cmd: &CheckBlockCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_export_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ExportBlocksCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_export_state_cmd( + self: Box, + config: Configuration, + cmd: &ExportStateCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_import_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ImportBlocksCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_revert_cmd( + self: Box, + config: Configuration, + cmd: &RevertCmd, + ) -> AsyncCmdResult<'_>; + + fn run_export_genesis_head_cmd( + self: Box, + config: Configuration, + cmd: &ExportGenesisHeadCommand, + ) -> SyncCmdResult; + + fn run_benchmark_block_cmd( + self: Box, + config: Configuration, + cmd: &BlockCmd, + ) -> SyncCmdResult; + + #[cfg(any(feature = "runtime-benchmarks"))] + fn run_benchmark_storage_cmd( + self: Box, + config: Configuration, + cmd: &StorageCmd, + ) -> SyncCmdResult; + + fn start_node( + self: Box, + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, + ) -> Pin>>>; +} + +impl DynNodeSpec for T +where + T: NodeSpec, +{ + fn prepare_check_block_cmd( + self: Box, + config: Configuration, + cmd: &CheckBlockCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager)) + } + + fn prepare_export_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ExportBlocksCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, config.database)), partial.task_manager)) + } + + fn prepare_export_state_cmd( + self: Box, + config: Configuration, + cmd: &ExportStateCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, config.chain_spec)), partial.task_manager)) + } + + fn prepare_import_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ImportBlocksCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager)) + } + + fn prepare_revert_cmd( + self: Box, + config: Configuration, + cmd: &RevertCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, partial.backend, None)), partial.task_manager)) + } + + fn run_export_genesis_head_cmd( + self: Box, + config: Configuration, + cmd: &ExportGenesisHeadCommand, + ) -> SyncCmdResult { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + cmd.run(partial.client) + } + + fn run_benchmark_block_cmd( + self: Box, + config: Configuration, + cmd: &BlockCmd, + ) -> SyncCmdResult { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + cmd.run(partial.client) + } + + #[cfg(any(feature = "runtime-benchmarks"))] + fn run_benchmark_storage_cmd( + self: Box, + config: Configuration, + cmd: &StorageCmd, + ) -> SyncCmdResult { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + let db = partial.backend.expose_db(); + let storage = partial.backend.expose_storage(); + + cmd.run(config, partial.client, db, storage) + } + + fn start_node( + self: Box, + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, + ) -> Pin>>> { + match parachain_config.network.network_backend { + sc_network::config::NetworkBackendType::Libp2p => + ::start_node::>( + parachain_config, + polkadot_config, + collator_options, + para_id, + hwbench, + ), + sc_network::config::NetworkBackendType::Litep2p => + ::start_node::( + parachain_config, + polkadot_config, + collator_options, + para_id, + hwbench, + ), + } + } +} diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index b89054b4dc321..62d99122c3012 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -192,7 +192,7 @@ where F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration), { let runner = cli - .create_runner_with_logger_hook::(&cli.run.base, logger_hook) + .create_runner_with_logger_hook::<_, _, F>(&cli.run.base, logger_hook) .map_err(Error::from)?; let chain_spec = &runner.config().chain_spec; diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index ecf79eac2883e..660b504e97fbb 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -59,7 +59,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.13.0"; +pub const NODE_VERSION: &'static str = "1.14.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 015e433382c8d..ef629c7dad155 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -163,7 +163,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index ca58a6390109d..0d7a8a6a4ac22 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -159,7 +159,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_013_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs index 0a33d52580fca..09ead1389d19d 100644 --- a/polkadot/xcm/procedural/src/builder_pattern.rs +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -233,6 +233,32 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result = data_enum + .variants + .iter() + .filter(|variant| variant.ident == "ClearOrigin") + .map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let method = match &variant.fields { + Fields::Unit => { + quote! { + #(#docs)* + pub fn #method_name(mut self) -> XcmBuilder { + self.instructions.push(#name::::#variant_name); + self + } + } + }, + _ => return Err(Error::new_spanned(variant, "ClearOrigin should have no fields")), + }; + Ok(method) + }) + .collect::, _>>()?; + // Then we require fees to be paid let buy_execution_method = data_enum .variants @@ -276,6 +302,7 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result XcmBuilder { + #(#allowed_after_load_holding_methods)* #buy_execution_method } }; diff --git a/polkadot/xcm/procedural/tests/builder_pattern.rs b/polkadot/xcm/procedural/tests/builder_pattern.rs index 96b16fb7e4565..4202309bf3f71 100644 --- a/polkadot/xcm/procedural/tests/builder_pattern.rs +++ b/polkadot/xcm/procedural/tests/builder_pattern.rs @@ -79,3 +79,24 @@ fn default_builder_requires_buy_execution() { ]) ); } + +#[test] +fn default_builder_allows_clear_origin_before_buy_execution() { + let asset: Asset = (Here, 100u128).into(); + let beneficiary: Location = [0u8; 32].into(); + let message: Xcm<()> = Xcm::builder() + .receive_teleported_asset(asset.clone()) + .clear_origin() + .buy_execution(asset.clone(), Unlimited) + .deposit_asset(asset.clone(), beneficiary.clone()) + .build(); + assert_eq!( + message, + Xcm(vec![ + ReceiveTeleportedAsset(asset.clone().into()), + ClearOrigin, + BuyExecution { fees: asset.clone(), weight_limit: Unlimited }, + DepositAsset { assets: asset.into(), beneficiary }, + ]) + ); +} diff --git a/prdoc/pr_1631.prdoc b/prdoc/1.14.0/pr_1631.prdoc similarity index 100% rename from prdoc/pr_1631.prdoc rename to prdoc/1.14.0/pr_1631.prdoc diff --git a/prdoc/pr_3374.prdoc b/prdoc/1.14.0/pr_3374.prdoc similarity index 100% rename from prdoc/pr_3374.prdoc rename to prdoc/1.14.0/pr_3374.prdoc diff --git a/prdoc/pr_3679.prdoc b/prdoc/1.14.0/pr_3679.prdoc similarity index 100% rename from prdoc/pr_3679.prdoc rename to prdoc/1.14.0/pr_3679.prdoc diff --git a/prdoc/pr_3820.prdoc b/prdoc/1.14.0/pr_3820.prdoc similarity index 100% rename from prdoc/pr_3820.prdoc rename to prdoc/1.14.0/pr_3820.prdoc diff --git a/prdoc/pr_3828.prdoc b/prdoc/1.14.0/pr_3828.prdoc similarity index 100% rename from prdoc/pr_3828.prdoc rename to prdoc/1.14.0/pr_3828.prdoc diff --git a/prdoc/pr_3843.prdoc b/prdoc/1.14.0/pr_3843.prdoc similarity index 100% rename from prdoc/pr_3843.prdoc rename to prdoc/1.14.0/pr_3843.prdoc diff --git a/prdoc/pr_3940.prdoc b/prdoc/1.14.0/pr_3940.prdoc similarity index 100% rename from prdoc/pr_3940.prdoc rename to prdoc/1.14.0/pr_3940.prdoc diff --git a/prdoc/pr_3951.prdoc b/prdoc/1.14.0/pr_3951.prdoc similarity index 100% rename from prdoc/pr_3951.prdoc rename to prdoc/1.14.0/pr_3951.prdoc diff --git a/prdoc/pr_4513.prdoc b/prdoc/1.14.0/pr_4513.prdoc similarity index 100% rename from prdoc/pr_4513.prdoc rename to prdoc/1.14.0/pr_4513.prdoc diff --git a/prdoc/pr_4596.prdoc b/prdoc/1.14.0/pr_4596.prdoc similarity index 100% rename from prdoc/pr_4596.prdoc rename to prdoc/1.14.0/pr_4596.prdoc diff --git a/prdoc/pr_4618.prdoc b/prdoc/1.14.0/pr_4618.prdoc similarity index 100% rename from prdoc/pr_4618.prdoc rename to prdoc/1.14.0/pr_4618.prdoc diff --git a/prdoc/pr_4662.prdoc b/prdoc/1.14.0/pr_4662.prdoc similarity index 100% rename from prdoc/pr_4662.prdoc rename to prdoc/1.14.0/pr_4662.prdoc diff --git a/prdoc/pr_4684.prdoc b/prdoc/1.14.0/pr_4684.prdoc similarity index 100% rename from prdoc/pr_4684.prdoc rename to prdoc/1.14.0/pr_4684.prdoc diff --git a/prdoc/pr_4685.prdoc b/prdoc/1.14.0/pr_4685.prdoc similarity index 100% rename from prdoc/pr_4685.prdoc rename to prdoc/1.14.0/pr_4685.prdoc diff --git a/prdoc/pr_4691.prdoc b/prdoc/1.14.0/pr_4691.prdoc similarity index 100% rename from prdoc/pr_4691.prdoc rename to prdoc/1.14.0/pr_4691.prdoc diff --git a/prdoc/pr_4710.prdoc b/prdoc/1.14.0/pr_4710.prdoc similarity index 100% rename from prdoc/pr_4710.prdoc rename to prdoc/1.14.0/pr_4710.prdoc diff --git a/prdoc/pr_4724.prdoc b/prdoc/1.14.0/pr_4724.prdoc similarity index 100% rename from prdoc/pr_4724.prdoc rename to prdoc/1.14.0/pr_4724.prdoc diff --git a/prdoc/pr_4728.prdoc b/prdoc/1.14.0/pr_4728.prdoc similarity index 100% rename from prdoc/pr_4728.prdoc rename to prdoc/1.14.0/pr_4728.prdoc diff --git a/prdoc/pr_4730.prdoc b/prdoc/1.14.0/pr_4730.prdoc similarity index 100% rename from prdoc/pr_4730.prdoc rename to prdoc/1.14.0/pr_4730.prdoc diff --git a/prdoc/pr_4733.prdoc b/prdoc/1.14.0/pr_4733.prdoc similarity index 100% rename from prdoc/pr_4733.prdoc rename to prdoc/1.14.0/pr_4733.prdoc diff --git a/prdoc/pr_4756.prdoc b/prdoc/1.14.0/pr_4756.prdoc similarity index 100% rename from prdoc/pr_4756.prdoc rename to prdoc/1.14.0/pr_4756.prdoc diff --git a/prdoc/pr_4757.prdoc b/prdoc/1.14.0/pr_4757.prdoc similarity index 100% rename from prdoc/pr_4757.prdoc rename to prdoc/1.14.0/pr_4757.prdoc diff --git a/prdoc/pr_4765.prdoc b/prdoc/1.14.0/pr_4765.prdoc similarity index 100% rename from prdoc/pr_4765.prdoc rename to prdoc/1.14.0/pr_4765.prdoc diff --git a/prdoc/pr_4769.prdoc b/prdoc/1.14.0/pr_4769.prdoc similarity index 100% rename from prdoc/pr_4769.prdoc rename to prdoc/1.14.0/pr_4769.prdoc diff --git a/prdoc/pr_4799.prdoc b/prdoc/1.14.0/pr_4799.prdoc similarity index 100% rename from prdoc/pr_4799.prdoc rename to prdoc/1.14.0/pr_4799.prdoc diff --git a/prdoc/pr_4802.prdoc b/prdoc/1.14.0/pr_4802.prdoc similarity index 100% rename from prdoc/pr_4802.prdoc rename to prdoc/1.14.0/pr_4802.prdoc diff --git a/prdoc/pr_4807.prdoc b/prdoc/1.14.0/pr_4807.prdoc similarity index 100% rename from prdoc/pr_4807.prdoc rename to prdoc/1.14.0/pr_4807.prdoc diff --git a/prdoc/pr_4823.prdoc b/prdoc/1.14.0/pr_4823.prdoc similarity index 100% rename from prdoc/pr_4823.prdoc rename to prdoc/1.14.0/pr_4823.prdoc diff --git a/prdoc/pr_4831.prdoc b/prdoc/1.14.0/pr_4831.prdoc similarity index 100% rename from prdoc/pr_4831.prdoc rename to prdoc/1.14.0/pr_4831.prdoc diff --git a/prdoc/pr_4833.prdoc b/prdoc/1.14.0/pr_4833.prdoc similarity index 100% rename from prdoc/pr_4833.prdoc rename to prdoc/1.14.0/pr_4833.prdoc diff --git a/prdoc/pr_4844.prdoc b/prdoc/1.14.0/pr_4844.prdoc similarity index 100% rename from prdoc/pr_4844.prdoc rename to prdoc/1.14.0/pr_4844.prdoc diff --git a/prdoc/pr_4857.prdoc b/prdoc/1.14.0/pr_4857.prdoc similarity index 100% rename from prdoc/pr_4857.prdoc rename to prdoc/1.14.0/pr_4857.prdoc diff --git a/prdoc/pr_4865.prdoc b/prdoc/1.14.0/pr_4865.prdoc similarity index 100% rename from prdoc/pr_4865.prdoc rename to prdoc/1.14.0/pr_4865.prdoc diff --git a/prdoc/pr_4877.prdoc b/prdoc/1.14.0/pr_4877.prdoc similarity index 100% rename from prdoc/pr_4877.prdoc rename to prdoc/1.14.0/pr_4877.prdoc diff --git a/prdoc/pr_3286.prdoc b/prdoc/pr_3286.prdoc new file mode 100644 index 0000000000000..6ec3f6552a4a7 --- /dev/null +++ b/prdoc/pr_3286.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Assets: can_decrease/increase for destroying asset is not successful" + +doc: + - audience: Runtime Dev + description: | + Functions `can_decrease` and `can_increase` do not return successful consequence results + for assets undergoing destruction; instead, they return the `UnknownAsset` consequence variant. + This update aligns their behavior with similar functions, such as `reducible_balance`, + `increase_balance`, `decrease_balance`, and `burn`, which return an `AssetNotLive` error + for assets in the process of being destroyed. + +crates: + - name: pallet-assets diff --git a/prdoc/pr_4777.prdoc b/prdoc/pr_4777.prdoc new file mode 100644 index 0000000000000..07fa8decebe08 --- /dev/null +++ b/prdoc/pr_4777.prdoc @@ -0,0 +1,27 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCM builder pattern allows clear_origin before buy_execution. + +doc: + - audience: Runtime Dev + description: | + Added clear_origin as an allowed command after commands that load the holdings register, in the safe xcm builder. + Previously, although it's logically allowed, an XCM could not be built like this: + ```rust + let xcm = Xcm::builder() + .withdraw_asset((Parent, 100u128)) + .clear_origin() + .buy_execution((Parent, 1u128)) + .deposit_asset(All, [0u8; 32]) + .build(); + ``` + You had to use the unsafe_builder. + Now, it's allowed using the default builder. + +crates: +- name: "xcm-procedural" + bump: minor +- name: "staging-xcm" + bump: minor + diff --git a/prdoc/pr_4863.prdoc b/prdoc/pr_4863.prdoc new file mode 100644 index 0000000000000..eb43b67a45c5c --- /dev/null +++ b/prdoc/pr_4863.prdoc @@ -0,0 +1,10 @@ +title: "Make `tracing::log` work in the runtime" + +doc: + - audience: Runtime Dev + description: | + Make `tracing::log` work in the runtime as `log` works in the runtime. + +crates: + - name: sp-runtime + bump: patch diff --git a/prdoc/pr_4885.prdoc b/prdoc/pr_4885.prdoc new file mode 100644 index 0000000000000..50dc31bc1b8fa --- /dev/null +++ b/prdoc/pr_4885.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-transaction-storage + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-transaction-storage`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + +crates: + - name: pallet-transaction-storage + bump: minor diff --git a/prdoc/pr_4912.prdoc b/prdoc/pr_4912.prdoc new file mode 100644 index 0000000000000..dd96054b81fa3 --- /dev/null +++ b/prdoc/pr_4912.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-babe + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-babe`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + When accessed outside the pallet, use the public functions of storage. + +crates: + - name: pallet-babe + bump: minor diff --git a/prdoc/pr_4943.prdoc b/prdoc/pr_4943.prdoc new file mode 100644 index 0000000000000..705325126060b --- /dev/null +++ b/prdoc/pr_4943.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Update definition of frozen balance (docs PR) + +doc: + - audience: Runtime Dev + description: | + This PR fixes a bug in the docs located in the definition of frozen balances. In addition, it extends that definition for completeness. + +crates: +- name: frame-support + bump: patch \ No newline at end of file diff --git a/substrate/client/cli/src/lib.rs b/substrate/client/cli/src/lib.rs index 104e8ec8b798e..1bb9fec0e2769 100644 --- a/substrate/client/cli/src/lib.rs +++ b/substrate/client/cli/src/lib.rs @@ -58,11 +58,11 @@ pub trait SubstrateCli: Sized { /// Implementation version. /// - /// By default this will look like this: + /// By default, it will look like this: /// /// `2.0.0-b950f731c` /// - /// Where the hash is the short commit hash of the commit of in the Git repository. + /// Where the hash is the short hash of the commit in the Git repository. fn impl_version() -> String; /// Executable file name. @@ -199,17 +199,8 @@ pub trait SubstrateCli: Sized { fn create_runner, DVC: DefaultConfigurationValues>( &self, command: &T, - ) -> error::Result> { - let tokio_runtime = build_runtime()?; - - // `capture` needs to be called in a tokio context. - // Also capture them as early as possible. - let signals = tokio_runtime.block_on(async { Signals::capture() })?; - - let config = command.create_configuration(self, tokio_runtime.handle().clone())?; - - command.init(&Self::support_url(), &Self::impl_version(), |_, _| {}, &config)?; - Runner::new(config, tokio_runtime, signals) + ) -> Result> { + self.create_runner_with_logger_hook(command, |_, _| {}) } /// Create a runner for the command provided in argument. The `logger_hook` can be used to setup @@ -231,11 +222,15 @@ pub trait SubstrateCli: Sized { /// } /// } /// ``` - fn create_runner_with_logger_hook( + fn create_runner_with_logger_hook< + T: CliConfiguration, + DVC: DefaultConfigurationValues, + F, + >( &self, command: &T, logger_hook: F, - ) -> error::Result> + ) -> Result> where F: FnOnce(&mut LoggerBuilder, &Configuration), { diff --git a/substrate/client/network/src/litep2p/peerstore.rs b/substrate/client/network/src/litep2p/peerstore.rs index 3f7155edbc923..347aa0b90eed5 100644 --- a/substrate/client/network/src/litep2p/peerstore.rs +++ b/substrate/client/network/src/litep2p/peerstore.rs @@ -85,6 +85,11 @@ impl PeerInfo { self.reputation < BANNED_THRESHOLD } + fn add_reputation(&mut self, increment: i32) { + self.reputation = self.reputation.saturating_add(increment); + self.bump_last_updated(); + } + fn decay_reputation(&mut self, seconds_passed: u64) { // Note that decaying the reputation value happens "on its own", // so we don't do `bump_last_updated()`. @@ -103,6 +108,10 @@ impl PeerInfo { } } } + + fn bump_last_updated(&mut self) { + self.last_updated = Instant::now(); + } } #[derive(Debug, Default)] @@ -169,7 +178,7 @@ impl PeerStoreProvider for PeerstoreHandle { match lock.peers.get_mut(&peer) { Some(info) => { - info.reputation = info.reputation.saturating_add(reputation_change.value); + info.add_reputation(reputation_change.value); }, None => { lock.peers.insert( diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index a51bb4012d5d8..63be296d1b216 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -137,7 +137,7 @@ pub struct PartialComponents, /// The chain task manager. pub task_manager: TaskManager, - /// A keystore container instance.. + /// A keystore container instance. pub keystore_container: KeystoreContainer, /// A chain selection algorithm instance. pub select_chain: SelectChain, diff --git a/substrate/frame/assets/src/functions.rs b/substrate/frame/assets/src/functions.rs index 2fb8aee1a970c..1e4a9522759c3 100644 --- a/substrate/frame/assets/src/functions.rs +++ b/substrate/frame/assets/src/functions.rs @@ -132,6 +132,9 @@ impl, I: 'static> Pallet { Some(details) => details, None => return DepositConsequence::UnknownAsset, }; + if details.status == AssetStatus::Destroying { + return DepositConsequence::UnknownAsset + } if increase_supply && details.supply.checked_add(&amount).is_none() { return DepositConsequence::Overflow } @@ -175,6 +178,9 @@ impl, I: 'static> Pallet { if details.status == AssetStatus::Frozen { return Frozen } + if details.status == AssetStatus::Destroying { + return UnknownAsset + } if amount.is_zero() { return Success } diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index 62106d47a1562..c751fbdcaf1bb 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -22,7 +22,11 @@ use crate::{mock::*, Error}; use frame_support::{ assert_noop, assert_ok, dispatch::GetDispatchInfo, - traits::{fungibles::InspectEnumerable, tokens::Preservation::Protect, Currency}, + traits::{ + fungibles::InspectEnumerable, + tokens::{Preservation::Protect, Provenance}, + Currency, + }, }; use pallet_balances::Error as BalancesError; use sp_io::storage; @@ -1778,6 +1782,35 @@ fn asset_destroy_refund_existence_deposit() { }); } +#[test] +fn increasing_or_decreasing_destroying_asset_should_not_work() { + new_test_ext().execute_with(|| { + use frame_support::traits::fungibles::Inspect; + + let admin = 1; + let admin_origin = RuntimeOrigin::signed(admin); + + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, admin, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + + assert_eq!(Assets::can_deposit(0, &1, 10, Provenance::Extant), DepositConsequence::Success); + assert_eq!(Assets::can_withdraw(0, &1, 10), WithdrawConsequence::<_>::Success); + assert_eq!(Assets::can_increase(0, &1, 10, false), DepositConsequence::Success); + assert_eq!(Assets::can_decrease(0, &1, 10, false), WithdrawConsequence::<_>::Success); + + assert_ok!(Assets::start_destroy(admin_origin, 0)); + + assert_eq!( + Assets::can_deposit(0, &1, 10, Provenance::Extant), + DepositConsequence::UnknownAsset + ); + assert_eq!(Assets::can_withdraw(0, &1, 10), WithdrawConsequence::<_>::UnknownAsset); + assert_eq!(Assets::can_increase(0, &1, 10, false), DepositConsequence::UnknownAsset); + assert_eq!(Assets::can_decrease(0, &1, 10, false), WithdrawConsequence::<_>::UnknownAsset); + }); +} + #[test] fn asset_id_cannot_be_reused() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index 686ba6ec2d634..9e16f1d095156 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -96,11 +96,11 @@ pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { fn trigger(now: BlockNumberFor) { - if >::should_epoch_change(now) { - let authorities = >::authorities(); + if Pallet::::should_epoch_change(now) { + let authorities = Authorities::::get(); let next_authorities = authorities.clone(); - >::enact_epoch_change(authorities, next_authorities, None); + Pallet::::enact_epoch_change(authorities, next_authorities, None); } } } @@ -185,12 +185,10 @@ pub mod pallet { /// Current epoch index. #[pallet::storage] - #[pallet::getter(fn epoch_index)] pub type EpochIndex = StorageValue<_, u64, ValueQuery>; /// Current epoch authorities. #[pallet::storage] - #[pallet::getter(fn authorities)] pub type Authorities = StorageValue< _, WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, @@ -200,12 +198,10 @@ pub mod pallet { /// The slot at which the first epoch actually started. This is 0 /// until the first block of the chain. #[pallet::storage] - #[pallet::getter(fn genesis_slot)] pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; /// Current slot number. #[pallet::storage] - #[pallet::getter(fn current_slot)] pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; /// The epoch randomness for the *current* epoch. @@ -222,20 +218,19 @@ pub mod pallet { // array size because the metadata API currently doesn't resolve the // variable to its underlying value. #[pallet::storage] - #[pallet::getter(fn randomness)] pub type Randomness = StorageValue<_, BabeRandomness, ValueQuery>; /// Pending epoch configuration change that will be applied when the next epoch is enacted. #[pallet::storage] - pub(super) type PendingEpochConfigChange = StorageValue<_, NextConfigDescriptor>; + pub type PendingEpochConfigChange = StorageValue<_, NextConfigDescriptor>; /// Next epoch randomness. #[pallet::storage] - pub(super) type NextRandomness = StorageValue<_, BabeRandomness, ValueQuery>; + pub type NextRandomness = StorageValue<_, BabeRandomness, ValueQuery>; /// Next epoch authorities. #[pallet::storage] - pub(super) type NextAuthorities = StorageValue< + pub type NextAuthorities = StorageValue< _, WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, ValueQuery, @@ -251,11 +246,11 @@ pub mod pallet { /// We reset all segments and return to `0` at the beginning of every /// epoch. #[pallet::storage] - pub(super) type SegmentIndex = StorageValue<_, u32, ValueQuery>; + pub type SegmentIndex = StorageValue<_, u32, ValueQuery>; /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. #[pallet::storage] - pub(super) type UnderConstruction = StorageMap< + pub type UnderConstruction = StorageMap< _, Twox64Concat, u32, @@ -266,16 +261,14 @@ pub mod pallet { /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. #[pallet::storage] - #[pallet::getter(fn initialized)] - pub(super) type Initialized = StorageValue<_, Option>; + pub type Initialized = StorageValue<_, Option>; /// This field should always be populated during block processing unless /// secondary plain slots are enabled (which don't contain a VRF output). /// /// It is set in `on_finalize`, before it will contain the value from the last block. #[pallet::storage] - #[pallet::getter(fn author_vrf_randomness)] - pub(super) type AuthorVrfRandomness = StorageValue<_, Option, ValueQuery>; + pub type AuthorVrfRandomness = StorageValue<_, Option, ValueQuery>; /// The block numbers when the last and current epoch have started, respectively `N-1` and /// `N`. @@ -292,19 +285,17 @@ pub mod pallet { /// on block finalization. Querying this storage entry outside of block /// execution context should always yield zero. #[pallet::storage] - #[pallet::getter(fn lateness)] - pub(super) type Lateness = StorageValue<_, BlockNumberFor, ValueQuery>; + pub type Lateness = StorageValue<_, BlockNumberFor, ValueQuery>; /// The configuration for the current epoch. Should never be `None` as it is initialized in /// genesis. #[pallet::storage] - #[pallet::getter(fn epoch_config)] - pub(super) type EpochConfig = StorageValue<_, BabeEpochConfiguration>; + pub type EpochConfig = StorageValue<_, BabeEpochConfiguration>; /// The configuration for the next epoch, `None` if the config will not change /// (you can fallback to `EpochConfig` instead in that case). #[pallet::storage] - pub(super) type NextEpochConfig = StorageValue<_, BabeEpochConfiguration>; + pub type NextEpochConfig = StorageValue<_, BabeEpochConfiguration>; /// A list of the last 100 skipped epochs and the corresponding session index /// when the epoch was skipped. @@ -315,8 +306,7 @@ pub mod pallet { /// a validator was the owner of a given key on a given session, and what the /// active epoch index was during that session. #[pallet::storage] - #[pallet::getter(fn skipped_epochs)] - pub(super) type SkippedEpochs = + pub type SkippedEpochs = StorageValue<_, BoundedVec<(u64, SessionIndex), ConstU32<100>>, ValueQuery>; #[derive(frame_support::DefaultNoBound)] @@ -368,7 +358,7 @@ pub mod pallet { .and_then(|(authority, _)| { let public = authority.as_inner_ref(); let transcript = sp_consensus_babe::make_vrf_transcript( - &Self::randomness(), + &Randomness::::get(), CurrentSlot::::get(), EpochIndex::::get(), ); @@ -510,7 +500,7 @@ impl FindAuthor for Pallet { impl IsMember for Pallet { fn is_member(authority_id: &AuthorityId) -> bool { - >::authorities().iter().any(|id| &id.0 == authority_id) + Authorities::::get().iter().any(|id| &id.0 == authority_id) } } @@ -526,6 +516,47 @@ impl pallet_session::ShouldEndSession> for Pallet Pallet { + /// Public function to access epoch_index storage. + pub fn epoch_index() -> u64 { + EpochIndex::::get() + } + /// Public function to access authorities storage. + pub fn authorities() -> WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities> { + Authorities::::get() + } + /// Public function to access genesis_slot storage. + pub fn genesis_slot() -> Slot { + GenesisSlot::::get() + } + /// Public function to access current_slot storage. + pub fn current_slot() -> Slot { + CurrentSlot::::get() + } + /// Public function to access randomness storage. + pub fn randomness() -> BabeRandomness { + Randomness::::get() + } + /// Public function to access initialized storage. + pub fn initialized() -> Option> { + Initialized::::get() + } + /// Public function to access author_vrf_randomness storage. + pub fn author_vrf_randomness() -> Option { + AuthorVrfRandomness::::get() + } + /// Public function to access lateness storage. + pub fn lateness() -> BlockNumberFor { + Lateness::::get() + } + /// Public function to access epoch_config storage. + pub fn epoch_config() -> Option { + EpochConfig::::get() + } + /// Public function to access skipped_epochs storage. + pub fn skipped_epochs() -> BoundedVec<(u64, SessionIndex), ConstU32<100>> { + SkippedEpochs::::get() + } + /// Determine the BABE slot duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { // we double the minimum block-period so each author can always propose within @@ -588,7 +619,7 @@ impl Pallet { ) { // PRECONDITION: caller has done initialization and is guaranteed // by the session module to be called before this. - debug_assert!(Self::initialized().is_some()); + debug_assert!(Initialized::::get().is_some()); if authorities.is_empty() { log::warn!(target: LOG_TARGET, "Ignoring empty epoch change."); @@ -655,7 +686,7 @@ impl Pallet { NextAuthorities::::put(&next_authorities); // Update the start blocks of the previous and new current epoch. - >::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { + EpochStart::::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { *previous_epoch_start_block = sp_std::mem::take(current_epoch_start_block); *current_epoch_start_block = >::block_number(); }); @@ -701,8 +732,8 @@ impl Pallet { epoch_index: EpochIndex::::get(), start_slot: Self::current_epoch_start(), duration: T::EpochDuration::get(), - authorities: Self::authorities().into_inner(), - randomness: Self::randomness(), + authorities: Authorities::::get().into_inner(), + randomness: Randomness::::get(), config: EpochConfig::::get() .expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), } @@ -779,8 +810,8 @@ impl Pallet { // we use the same values as genesis because we haven't collected any // randomness yet. let next = NextEpochDescriptor { - authorities: Self::authorities().into_inner(), - randomness: Self::randomness(), + authorities: Authorities::::get().into_inner(), + randomness: Randomness::::get(), }; Self::deposit_consensus(ConsensusLog::NextEpochData(next)); @@ -789,7 +820,7 @@ impl Pallet { fn initialize(now: BlockNumberFor) { // since `initialize` can be called twice (e.g. if session module is present) // let's ensure that we only do the initialization once per block - let initialized = Self::initialized().is_some(); + let initialized = Initialized::::get().is_some(); if initialized { return } @@ -940,7 +971,7 @@ impl frame_support::traits::EstimateNextSessionRotation frame_support::traits::Lateness> for Pallet { fn lateness(&self) -> BlockNumberFor { - Self::lateness() + Lateness::::get() } } diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index be38e3e7e5db5..e193a2e3b6454 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -213,7 +213,7 @@ pub fn go_to_block(n: u64, s: u64) { /// Slots will grow accordingly to blocks pub fn progress_to_block(n: u64) { - let mut slot = u64::from(Babe::current_slot()) + 1; + let mut slot = u64::from(CurrentSlot::::get()) + 1; for i in System::block_number() + 1..=n { go_to_block(i, slot); slot += 1; @@ -272,7 +272,8 @@ pub fn make_vrf_signature_and_randomness( slot: Slot, pair: &sp_consensus_babe::AuthorityPair, ) -> (VrfSignature, Randomness) { - let transcript = sp_consensus_babe::make_vrf_transcript(&Babe::randomness(), slot, 0); + let transcript = + sp_consensus_babe::make_vrf_transcript(&pallet_babe::Randomness::::get(), slot, 0); let randomness = pair.as_ref().make_bytes(sp_consensus_babe::RANDOMNESS_VRF_CONTEXT, &transcript); diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index e65f1844f88f9..b9a214ca105c8 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -43,7 +43,7 @@ fn empty_randomness_is_correct() { #[test] fn initial_values() { - new_test_ext(4).execute_with(|| assert_eq!(Babe::authorities().len(), 4)) + new_test_ext(4).execute_with(|| assert_eq!(Authorities::::get().len(), 4)) } #[test] @@ -68,25 +68,25 @@ fn first_block_epoch_zero_start() { let pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_signature); - assert_eq!(Babe::genesis_slot(), Slot::from(0)); + assert_eq!(GenesisSlot::::get(), Slot::from(0)); System::reset_events(); System::initialize(&1, &Default::default(), &pre_digest); // see implementation of the function for details why: we issue an // epoch-change digest but don't do it via the normal session mechanism. assert!(!Babe::should_end_session(1)); - assert_eq!(Babe::genesis_slot(), genesis_slot); - assert_eq!(Babe::current_slot(), genesis_slot); - assert_eq!(Babe::epoch_index(), 0); + assert_eq!(GenesisSlot::::get(), genesis_slot); + assert_eq!(CurrentSlot::::get(), genesis_slot); + assert_eq!(EpochIndex::::get(), 0); Babe::on_finalize(1); let header = System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); assert_eq!(SegmentIndex::::get(), 0); assert_eq!(UnderConstruction::::get(0), vec![vrf_randomness]); - assert_eq!(Babe::randomness(), [0; 32]); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(Randomness::::get(), [0; 32]); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!(header.digest.logs.len(), 2); @@ -95,8 +95,8 @@ fn first_block_epoch_zero_start() { let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( sp_consensus_babe::digests::NextEpochDescriptor { - authorities: Babe::authorities().into_inner(), - randomness: Babe::randomness(), + authorities: Authorities::::get().into_inner(), + randomness: Randomness::::get(), }, ); let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); @@ -118,19 +118,19 @@ fn current_slot_is_processed_on_initialization() { System::reset_events(); System::initialize(&1, &Default::default(), &pre_digest); - assert_eq!(Babe::current_slot(), Slot::from(0)); - assert!(Babe::initialized().is_none()); + assert_eq!(CurrentSlot::::get(), Slot::from(0)); + assert!(Initialized::::get().is_none()); // current slot is updated on initialization Babe::initialize(1); - assert_eq!(Babe::current_slot(), genesis_slot); - assert!(Babe::initialized().is_some()); + assert_eq!(CurrentSlot::::get(), genesis_slot); + assert!(Initialized::::get().is_some()); // but author vrf randomness isn't - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); // instead it is updated on block finalization Babe::on_finalize(1); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); }) } @@ -151,16 +151,16 @@ where // author vrf randomness is not updated on initialization Babe::initialize(1); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); // instead it is updated on block finalization to account for any // epoch changes that might happen during the block Babe::on_finalize(1); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); // and it is kept after finalizing the block System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); }) } @@ -182,14 +182,14 @@ fn no_author_vrf_output_for_secondary_plain() { System::reset_events(); System::initialize(&1, &Default::default(), &secondary_plain_pre_digest); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); Babe::initialize(1); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); Babe::on_finalize(1); System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); }) } @@ -210,14 +210,14 @@ fn can_predict_next_epoch_change() { assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(*Babe::genesis_slot(), 6); - assert_eq!(*Babe::current_slot(), 6); - assert_eq!(Babe::epoch_index(), 0); + assert_eq!(*GenesisSlot::::get(), 6); + assert_eq!(*CurrentSlot::::get(), 6); + assert_eq!(EpochIndex::::get(), 0); progress_to_block(5); - assert_eq!(Babe::epoch_index(), 5 / 3); - assert_eq!(*Babe::current_slot(), 10); + assert_eq!(EpochIndex::::get(), 5 / 3); + assert_eq!(*CurrentSlot::::get(), 10); // next epoch change will be at assert_eq!(*Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now @@ -266,9 +266,9 @@ fn can_enact_next_config() { assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(*Babe::genesis_slot(), 6); - assert_eq!(*Babe::current_slot(), 6); - assert_eq!(Babe::epoch_index(), 0); + assert_eq!(*GenesisSlot::::get(), 6); + assert_eq!(*CurrentSlot::::get(), 6); + assert_eq!(EpochIndex::::get(), 0); go_to_block(2, 7); let current_config = BabeEpochConfiguration { @@ -431,7 +431,7 @@ fn report_equivocation_current_session_works() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); let validators = Session::validators(); // make sure that all authorities have the same balance @@ -508,7 +508,7 @@ fn report_equivocation_old_session_works() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // we will use the validator at index 0 as the offending authority let offending_validator_index = 1; @@ -566,7 +566,7 @@ fn report_equivocation_invalid_key_owner_proof() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // we will use the validator at index 0 as the offending authority let offending_validator_index = 0; @@ -629,7 +629,7 @@ fn report_equivocation_invalid_equivocation_proof() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // we will use the validator at index 0 as the offending authority let offending_validator_index = 0; @@ -734,7 +734,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // generate and report an equivocation for the validator at index 0 let offending_validator_index = 0; @@ -848,7 +848,7 @@ fn report_equivocation_after_skipped_epochs_works() { assert_eq!(SkippedEpochs::::get(), vec![(10, 1)]); // generate an equivocation proof for validator at index 1 - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); let offending_validator_index = 1; let offending_authority_pair = pairs .into_iter() diff --git a/substrate/frame/support/src/traits/tokens/fungible/mod.rs b/substrate/frame/support/src/traits/tokens/fungible/mod.rs index b8e985648983e..a113cb01c982d 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/mod.rs @@ -58,13 +58,18 @@ //! 3 holds for 100 units, the account can spend its funds for any reason down to 300 units, at //! which point the holds will start to come into play. //! -//! - **Frozen Balance**: A freeze on a specified amount of an account's free balance until a -//! specified block number. +//! - **Frozen Balance**: A freeze on a specified amount of an account's balance. Tokens that are +//! frozen cannot be transferred. //! //! Multiple freezes always operate over the same funds, so they "overlay" rather than //! "stack". This means that if an account has 3 freezes for 100 units, the account can spend its //! funds for any reason down to 100 units, at which point the freezes will start to come into //! play. +//! +//! It's important to note that the frozen balance can exceed the total balance of the account. +//! This is useful, eg, in cases where you want to prevent a user from transferring any fund. In +//! such a case, setting the frozen balance to `Balance::MAX` would serve that purpose +//! effectively. //! //! - **Minimum Balance (a.k.a. Existential Deposit, a.k.a. ED)**: The minimum balance required to //! create or keep an account open. This is to prevent "dust accounts" from filling storage. When diff --git a/substrate/frame/transaction-storage/src/lib.rs b/substrate/frame/transaction-storage/src/lib.rs index 398cb350c501e..6e58ee3e585ab 100644 --- a/substrate/frame/transaction-storage/src/lib.rs +++ b/substrate/frame/transaction-storage/src/lib.rs @@ -159,11 +159,11 @@ pub mod pallet { fn on_initialize(n: BlockNumberFor) -> Weight { // Drop obsolete roots. The proof for `obsolete` will be checked later // in this block, so we drop `obsolete` - 1. - let period = >::get(); + let period = StoragePeriod::::get(); let obsolete = n.saturating_sub(period.saturating_add(One::one())); if obsolete > Zero::zero() { - >::remove(obsolete); - >::remove(obsolete); + Transactions::::remove(obsolete); + ChunkCount::::remove(obsolete); } // 2 writes in `on_initialize` and 2 writes + 2 reads in `on_finalize` T::DbWeight::get().reads_writes(2, 4) @@ -171,21 +171,21 @@ pub mod pallet { fn on_finalize(n: BlockNumberFor) { assert!( - >::take() || { + ProofChecked::::take() || { // Proof is not required for early or empty blocks. - let number = >::block_number(); - let period = >::get(); + let number = frame_system::Pallet::::block_number(); + let period = StoragePeriod::::get(); let target_number = number.saturating_sub(period); - target_number.is_zero() || >::get(target_number) == 0 + target_number.is_zero() || ChunkCount::::get(target_number) == 0 }, "Storage proof must be checked once in the block" ); // Insert new transactions - let transactions = >::take(); + let transactions = BlockTransactions::::take(); let total_chunks = transactions.last().map_or(0, |t| t.block_chunks); if total_chunks != 0 { - >::insert(n, total_chunks); - >::insert(n, transactions); + ChunkCount::::insert(n, total_chunks); + Transactions::::insert(n, transactions); } } } @@ -215,11 +215,11 @@ pub mod pallet { let content_hash = sp_io::hashing::blake2_256(&data); let extrinsic_index = - >::extrinsic_index().ok_or(Error::::BadContext)?; + frame_system::Pallet::::extrinsic_index().ok_or(Error::::BadContext)?; sp_io::transaction_index::index(extrinsic_index, data.len() as u32, content_hash); let mut index = 0; - >::mutate(|transactions| { + BlockTransactions::::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { return Err(Error::::TooManyTransactions) } @@ -253,17 +253,17 @@ pub mod pallet { index: u32, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; - let transactions = >::get(block).ok_or(Error::::RenewedNotFound)?; + let transactions = Transactions::::get(block).ok_or(Error::::RenewedNotFound)?; let info = transactions.get(index as usize).ok_or(Error::::RenewedNotFound)?; let extrinsic_index = - >::extrinsic_index().ok_or(Error::::BadContext)?; + frame_system::Pallet::::extrinsic_index().ok_or(Error::::BadContext)?; Self::apply_fee(sender, info.size)?; sp_io::transaction_index::renew(extrinsic_index, info.content_hash.into()); let mut index = 0; - >::mutate(|transactions| { + BlockTransactions::::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { return Err(Error::::TooManyTransactions) } @@ -297,15 +297,15 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { ensure_none(origin)?; ensure!(!ProofChecked::::get(), Error::::DoubleCheck); - let number = >::block_number(); - let period = >::get(); + let number = frame_system::Pallet::::block_number(); + let period = StoragePeriod::::get(); let target_number = number.saturating_sub(period); ensure!(!target_number.is_zero(), Error::::UnexpectedProof); - let total_chunks = >::get(target_number); + let total_chunks = ChunkCount::::get(target_number); ensure!(total_chunks != 0, Error::::UnexpectedProof); - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); let selected_chunk_index = random_chunk(parent_hash.as_ref(), total_chunks); - let (info, chunk_index) = match >::get(target_number) { + let (info, chunk_index) = match Transactions::::get(target_number) { Some(infos) => { let index = match infos .binary_search_by_key(&selected_chunk_index, |info| info.block_chunks) @@ -349,8 +349,7 @@ pub mod pallet { /// Collection of transaction metadata by block number. #[pallet::storage] - #[pallet::getter(fn transaction_roots)] - pub(super) type Transactions = StorageMap< + pub type Transactions = StorageMap< _, Blake2_128Concat, BlockNumberFor, @@ -360,32 +359,30 @@ pub mod pallet { /// Count indexed chunks for each block. #[pallet::storage] - pub(super) type ChunkCount = + pub type ChunkCount = StorageMap<_, Blake2_128Concat, BlockNumberFor, u32, ValueQuery>; #[pallet::storage] - #[pallet::getter(fn byte_fee)] /// Storage fee per byte. - pub(super) type ByteFee = StorageValue<_, BalanceOf>; + pub type ByteFee = StorageValue<_, BalanceOf>; #[pallet::storage] - #[pallet::getter(fn entry_fee)] /// Storage fee per transaction. - pub(super) type EntryFee = StorageValue<_, BalanceOf>; + pub type EntryFee = StorageValue<_, BalanceOf>; /// Storage period for data in blocks. Should match `sp_storage_proof::DEFAULT_STORAGE_PERIOD` /// for block authoring. #[pallet::storage] - pub(super) type StoragePeriod = StorageValue<_, BlockNumberFor, ValueQuery>; + pub type StoragePeriod = StorageValue<_, BlockNumberFor, ValueQuery>; // Intermediates #[pallet::storage] - pub(super) type BlockTransactions = + pub type BlockTransactions = StorageValue<_, BoundedVec, ValueQuery>; /// Was the proof checked in this block? #[pallet::storage] - pub(super) type ProofChecked = StorageValue<_, bool, ValueQuery>; + pub type ProofChecked = StorageValue<_, bool, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -407,9 +404,9 @@ pub mod pallet { #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - >::put(&self.byte_fee); - >::put(&self.entry_fee); - >::put(&self.storage_period); + ByteFee::::put(&self.byte_fee); + EntryFee::::put(&self.entry_fee); + StoragePeriod::::put(&self.storage_period); } } @@ -439,6 +436,21 @@ pub mod pallet { } impl Pallet { + /// Get transaction storage information from outside of this pallet. + pub fn transaction_roots( + block: BlockNumberFor, + ) -> Option> { + Transactions::::get(block) + } + /// Get ByteFee storage information from outside of this pallet. + pub fn byte_fee() -> Option> { + ByteFee::::get() + } + /// Get EntryFee storage information from outside of this pallet. + pub fn entry_fee() -> Option> { + EntryFee::::get() + } + fn apply_fee(sender: T::AccountId, size: u32) -> DispatchResult { let byte_fee = ByteFee::::get().ok_or(Error::::NotConfigured)?; let entry_fee = EntryFee::::get().ok_or(Error::::NotConfigured)?; diff --git a/substrate/frame/transaction-storage/src/tests.rs b/substrate/frame/transaction-storage/src/tests.rs index 621f74804ecca..b725990e6e121 100644 --- a/substrate/frame/transaction-storage/src/tests.rs +++ b/substrate/frame/transaction-storage/src/tests.rs @@ -40,9 +40,9 @@ fn discards_data() { vec![0u8; 2000 as usize] )); let proof_provider = || { - let block_num = >::block_number(); + let block_num = frame_system::Pallet::::block_number(); if block_num == 11 { - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); Some( build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000], vec![0u8; 2000]]) .unwrap(), @@ -92,7 +92,7 @@ fn checks_proof() { vec![0u8; MAX_DATA_SIZE as usize] )); run_to_block(10, || None); - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); let proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); assert_noop!( @@ -100,7 +100,7 @@ fn checks_proof() { Error::::UnexpectedProof, ); run_to_block(11, || None); - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); let invalid_proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; 1000]]).unwrap(); assert_noop!( @@ -132,9 +132,9 @@ fn renews_data() { )); assert_eq!(Balances::free_balance(1), 1_000_000_000 - 4000 * 2 - 200 * 2); let proof_provider = || { - let block_num = >::block_number(); + let block_num = frame_system::Pallet::::block_number(); if block_num == 11 || block_num == 16 { - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); Some(build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000]]).unwrap()) } else { None diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 098bd135bfebb..78ec92e4be98d 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -430,16 +430,7 @@ pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB macro_rules! generate_feature_enabled_macro { ( $macro_name:ident, $feature_name:meta, $d:tt ) => { $crate::paste::paste!{ - /// Enable/disable the given code depending on - #[doc = concat!("`", stringify!($feature_name), "`")] - /// being enabled for the crate or not. - /// - /// # Example /// - /// ```nocompile - /// // Will add the code depending on the feature being enabled or not. - #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] - /// ``` #[cfg($feature_name)] #[macro_export] macro_rules! [<_ $macro_name>] { @@ -448,6 +439,13 @@ macro_rules! generate_feature_enabled_macro { } } + /// + #[cfg(not($feature_name))] + #[macro_export] + macro_rules! [<_ $macro_name>] { + ( $d ( $d input:tt )* ) => {}; + } + /// Enable/disable the given code depending on #[doc = concat!("`", stringify!($feature_name), "`")] /// being enabled for the crate or not. @@ -458,15 +456,8 @@ macro_rules! generate_feature_enabled_macro { /// // Will add the code depending on the feature being enabled or not. #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] /// ``` - #[cfg(not($feature_name))] - #[macro_export] - macro_rules! [<_ $macro_name>] { - ( $d ( $d input:tt )* ) => {}; - } - - // Work around for: - #[doc(hidden)] - pub use [<_ $macro_name>] as $macro_name; + // https://github.com/rust-lang/rust/pull/52234 + pub use [<_ $macro_name>] as $macro_name; } }; } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 4fafe45ff28d4..83b9422ca1d88 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -34,6 +34,7 @@ sp-io = { workspace = true } sp-std = { workspace = true } sp-weights = { workspace = true } docify = { workspace = true } +tracing = { workspace = true, features = ["log"], default-features = false } simple-mermaid = { version = "0.1.1", optional = true } @@ -69,6 +70,7 @@ std = [ "sp-std/std", "sp-tracing/std", "sp-weights/std", + "tracing/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/runtime/src/runtime_logger.rs b/substrate/primitives/runtime/src/runtime_logger.rs index b7374b8b6f6c8..6f1e738eb0e7a 100644 --- a/substrate/primitives/runtime/src/runtime_logger.rs +++ b/substrate/primitives/runtime/src/runtime_logger.rs @@ -66,16 +66,15 @@ impl log::Log for RuntimeLogger { #[cfg(test)] mod tests { use sp_api::ProvideRuntimeApi; - use std::{env, str::FromStr}; + use std::env; use substrate_test_runtime_client::{ runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; #[test] - fn ensure_runtime_logger_respects_host_max_log_level() { + fn ensure_runtime_logger_works() { if env::var("RUN_TEST").is_ok() { sp_tracing::try_init_simple(); - log::set_max_level(log::LevelFilter::from_str(&env::var("RUST_LOG").unwrap()).unwrap()); let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); @@ -83,17 +82,19 @@ mod tests { .do_trace_log(client.chain_info().genesis_hash) .expect("Logging should not fail"); } else { - for (level, should_print) in &[("trace", true), ("info", false)] { + for (level, should_print) in &[("test=trace", true), ("info", false)] { let executable = std::env::current_exe().unwrap(); let output = std::process::Command::new(executable) .env("RUN_TEST", "1") .env("RUST_LOG", level) - .args(&["--nocapture", "ensure_runtime_logger_respects_host_max_log_level"]) + .args(&["--nocapture", "ensure_runtime_logger_works"]) .output() .unwrap(); let output = String::from_utf8(output.stderr).unwrap(); assert!(output.contains("Hey I'm runtime") == *should_print); + assert!(output.contains("THIS IS TRACING") == *should_print); + assert!(output.contains("Hey, I'm tracing") == *should_print); } } } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index e3282f8e598ed..5c8f49a6db859 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -53,7 +53,7 @@ sp-externalities = { workspace = true } array-bytes = { optional = true, workspace = true, default-features = true } serde_json = { workspace = true, features = ["alloc"] } log = { workspace = true } -hex-literal = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = false } [dev-dependencies] futures = { workspace = true } @@ -112,6 +112,7 @@ std = [ "sp-trie/std", "sp-version/std", "substrate-wasm-builder", + "tracing/std", "trie-db/std", ] diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index a5769b56dd552..a13441302e4df 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -596,7 +596,11 @@ impl_runtime_apis! { } fn do_trace_log() { - log::trace!("Hey I'm runtime"); + log::trace!(target: "test", "Hey I'm runtime"); + + let data = "THIS IS TRACING"; + + tracing::trace!(target: "test", %data, "Hey, I'm tracing"); } fn verify_ed25519(sig: ed25519::Signature, public: ed25519::Public, message: Vec) -> bool { diff --git a/templates/minimal/README.md b/templates/minimal/README.md index 9d27bcc994414..eaa21a05ee89b 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -14,7 +14,7 @@ * ๐Ÿค This template is a minimal (in terms of complexity and the number of components) template for building a blockchain node. -* ๐Ÿ”ง Its runtime is configured of a single custom pallet as a starting point, and a handful of ready-made pallets +* ๐Ÿ”ง Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). * ๐Ÿ‘ค The template has no consensus configured - it is best for experimenting with a single node network. @@ -57,7 +57,7 @@ and don't forget to also customize the corresponding comments. ๐Ÿ”จ Use the following command to build the node without launching it: ```sh -cargo build --release +cargo build --package minimal-template-node --release ``` ๐Ÿณ Alternatively, build the docker image: @@ -80,7 +80,7 @@ docker run --rm polkadot-sdk-minimal-template --dev Development chains: * ๐Ÿงน Do not persist the state. -* ๐Ÿ’ฐ Are preconfigured with a genesis state that includes several prefunded development accounts. +* ๐Ÿ’ฐ Are preconfigured with a genesis state that includes several pre-funded development accounts. * ๐Ÿง‘โ€โš–๏ธ Development accounts are used as `sudo` accounts. ### Connect with the Polkadot-JS Apps Front-End diff --git a/templates/parachain/README.md b/templates/parachain/README.md index a6ac91799b777..802d8586b39e0 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -16,7 +16,7 @@ * โ˜๏ธ It is based on the [Cumulus](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) framework. -* ๐Ÿ”ง Its runtime is configured of a single custom pallet as a starting point, and a handful of ready-made pallets +* ๐Ÿ”ง Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). * ๐Ÿ‘‰ Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains) @@ -44,7 +44,7 @@ packages required to compile this template - please take note of the Rust compil ๐Ÿ”จ Use the following command to build the node without launching it: ```sh -cargo build --release +cargo build --package parachain-template-node --release ``` ๐Ÿณ Alternatively, build the docker image: @@ -70,7 +70,7 @@ and `zombienet` - into `PATH` like so: export PATH="./target/release/:$PATH" ``` -This way, we can conveniently use them un the following steps. +This way, we can conveniently use them in the following steps. ๐Ÿ‘ฅ The following command starts a local development chain, with a single relay chain node and a single parachain collator: diff --git a/templates/parachain/node/README.md b/templates/parachain/node/README.md index 350272c7b6efe..ad309d4015aab 100644 --- a/templates/parachain/node/README.md +++ b/templates/parachain/node/README.md @@ -7,7 +7,7 @@ โš™๏ธ It acts as a remote procedure call (RPC) server, allowing interaction with the blockchain. -๐Ÿ‘‰ Learn more about the architecture, and a difference between a node and a runtime +๐Ÿ‘‰ Learn more about the architecture, and the difference between a node and a runtime [here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/wasm_meta_protocol/index.html). ๐Ÿ‘‡ Here are the most important files in this node template: diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 2e3b1146a8fde..c5dc5db7f3b51 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -28,7 +28,7 @@ installation](#alternatives-installations) options. Use the following command to build the node without launching it: ```sh -cargo build --release +cargo build --package solochain-template-node --release ``` ### Embedded Docs @@ -37,7 +37,7 @@ After you build the project, you can use the following command to explore its parameters and subcommands: ```sh -./target/release/node-template -h +./target/release/solochain-template-node -h ``` You can generate and view the [Rust @@ -54,19 +54,19 @@ The following command starts a single-node development chain that doesn't persist state: ```sh -./target/release/node-template --dev +./target/release/solochain-template-node --dev ``` To purge the development chain's state, run the following command: ```sh -./target/release/node-template purge-chain --dev +./target/release/solochain-template-node purge-chain --dev ``` To start the development chain with detailed logging, run the following command: ```sh -RUST_BACKTRACE=1 ./target/release/node-template -ldebug --dev +RUST_BACKTRACE=1 ./target/release/solochain-template-node -ldebug --dev ``` Development chains: @@ -75,7 +75,7 @@ Development chains: - Use the **Alice** and **Bob** accounts as default validator authorities. - Use the **Alice** account as the default `sudo` account. - Are preconfigured with a genesis state (`/node/src/chain_spec.rs`) that - includes several prefunded development accounts. + includes several pre-funded development accounts. To persist chain state between runs, specify a base path by running a command @@ -86,7 +86,7 @@ similar to the following: $ mkdir my-chain-state // Use of that folder to store the chain state -$ ./target/release/node-template --dev --base-path ./my-chain-state/ +$ ./target/release/solochain-template-node --dev --base-path ./my-chain-state/ // Check the folder structure created inside the base path after running the chain $ ls ./my-chain-state @@ -142,7 +142,7 @@ following: file that defines a Substrate chain's initial (genesis) state. Chain specifications are useful for development and testing, and critical when architecting the launch of a production chain. Take note of the - `development_config` and `testnet_genesis` functions,. These functions are + `development_config` and `testnet_genesis` functions. These functions are used to define the genesis state for the local development chain configuration. These functions identify some [well-known accounts](https://docs.substrate.io/reference/command-line-tools/subkey/) and