From f0eec07f93759331e6520ccc67f3d3291f0122c4 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Mon, 13 Jan 2025 18:38:52 +0200 Subject: [PATCH 1/4] Increase the number of pvf execute workers (#7116) Reference hardware requirements have been bumped to at least 8 cores so we can no allocate 50% of that capacity to PVF execution. --------- Signed-off-by: Alexandru Gheorghe --- polkadot/node/service/src/lib.rs | 11 +++-------- prdoc/pr_7116.prdoc | 8 ++++++++ 2 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 prdoc/pr_7116.prdoc diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 227bc5253994..820cce8d083a 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -944,14 +944,9 @@ pub fn new_full< secure_validator_mode, prep_worker_path, exec_worker_path, - pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or_else( - || match config.chain_spec.identify_chain() { - // The intention is to use this logic for gradual increasing from 2 to 4 - // of this configuration chain by chain until it reaches production chain. - Chain::Polkadot | Chain::Kusama => 2, - Chain::Rococo | Chain::Westend | Chain::Unknown => 4, - }, - ), + // Default execution workers is 4 because we have 8 cores on the reference hardware, + // and this accounts for 50% of that cpu capacity. + pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or(4), pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1), pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2), }) diff --git a/prdoc/pr_7116.prdoc b/prdoc/pr_7116.prdoc new file mode 100644 index 000000000000..95a5254778a4 --- /dev/null +++ b/prdoc/pr_7116.prdoc @@ -0,0 +1,8 @@ +title: Increase the number of pvf execution workers from 2 to 4 +doc: +- audience: Node Dev + description: |- + Increase the number of pvf execution workers from 2 to 4. +crates: +- name: polkadot-service + bump: patch From 0e0fa4782e2872ea74d8038ebedb9f6e6be53457 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 13 Jan 2025 18:42:22 +0100 Subject: [PATCH 2/4] `fatxpool`: rotator cache size now depends on pool's limits (#7102) # Description This PR modifies the hard-coded size of extrinsics cache within [`PoolRotator`](https://github.com/paritytech/polkadot-sdk/blob/cdf107de700388a52a17b2fb852c98420c78278e/substrate/client/transaction-pool/src/graph/rotator.rs#L36-L45) to be inline with pool limits. The problem was, that due to small size (comparing to number of txs in single block) of hard coded size: https://github.com/paritytech/polkadot-sdk/blob/cdf107de700388a52a17b2fb852c98420c78278e/substrate/client/transaction-pool/src/graph/rotator.rs#L34 excessive number of unnecessary verification were performed in `prune_tags`: https://github.com/paritytech/polkadot-sdk/blob/cdf107de700388a52a17b2fb852c98420c78278e/substrate/client/transaction-pool/src/graph/pool.rs#L369-L370 This was resulting in quite long durations of `prune_tags` execution time (which was ok for 6s, but becomes noticable for 2s blocks): ``` Pruning at HashAndNumber { number: 83, ... }. Resubmitting transactions: 6142, reverification took: 237.818955ms Pruning at HashAndNumber { number: 84, ... }. Resubmitting transactions: 5985, reverification took: 222.118218ms Pruning at HashAndNumber { number: 85, ... }. Resubmitting transactions: 5981, reverification took: 215.546847ms ``` The fix reduces the overhead: ``` Pruning at HashAndNumber { number: 92, ... }. Resubmitting transactions: 6325, reverification took: 14.728354ms Pruning at HashAndNumber { number: 93, ... }. Resubmitting transactions: 7030, reverification took: 23.973607ms Pruning at HashAndNumber { number: 94, ... }. Resubmitting transactions: 4465, reverification took: 9.532472ms ``` ## Review Notes I decided to leave the hardocded `EXPECTED_SIZE` for the legacy transaction pool. Removing verification of transactions during re-submission may negatively impact the behavior of the legacy (single-state) pool. As in long-term we probably want to deprecate old pool, I did not invest time to assess the impact of rotator change in behavior of the legacy pool. --------- Co-authored-by: command-bot <> Co-authored-by: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com> --- prdoc/pr_7102.prdoc | 8 +++ .../client/transaction-pool/benches/basics.rs | 12 ++++- .../transaction-pool/src/common/tests.rs | 2 +- .../src/fork_aware_txpool/dropped_watcher.rs | 4 +- .../fork_aware_txpool/fork_aware_txpool.rs | 2 +- .../client/transaction-pool/src/graph/pool.rs | 49 ++++++++++++++----- .../transaction-pool/src/graph/rotator.rs | 42 ++++++++++++---- .../src/graph/validated_pool.rs | 31 ++++++++++-- .../src/single_state_txpool/revalidation.rs | 12 ++++- .../single_state_txpool.rs | 12 ++++- .../client/transaction-pool/tests/fatp.rs | 4 +- .../client/transaction-pool/tests/pool.rs | 4 +- 12 files changed, 144 insertions(+), 38 deletions(-) create mode 100644 prdoc/pr_7102.prdoc diff --git a/prdoc/pr_7102.prdoc b/prdoc/pr_7102.prdoc new file mode 100644 index 000000000000..b1923aafc3db --- /dev/null +++ b/prdoc/pr_7102.prdoc @@ -0,0 +1,8 @@ +title: '`fatxpool`: rotator cache size now depends on pool''s limits' +doc: +- audience: Node Dev + description: |- + This PR modifies the hard-coded size of extrinsics cache within `PoolRotator` to be inline with pool limits. It only applies to fork-aware transaction pool. For the legacy (single-state) transaction pool the logic remains untouched. +crates: +- name: sc-transaction-pool + bump: minor diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 5e40b0fb72d6..5ba9dd40c156 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -197,14 +197,22 @@ fn benchmark_main(c: &mut Criterion) { c.bench_function("sequential 50 tx", |b| { b.iter(|| { let api = Arc::from(TestApi::new_dependant()); - bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 50, api); + bench_configured( + Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), + 50, + api, + ); }); }); c.bench_function("random 100 tx", |b| { b.iter(|| { let api = Arc::from(TestApi::default()); - bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 100, api); + bench_configured( + Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), + 100, + api, + ); }); }); } diff --git a/substrate/client/transaction-pool/src/common/tests.rs b/substrate/client/transaction-pool/src/common/tests.rs index b00cf5fbfede..7f2cbe24d8ef 100644 --- a/substrate/client/transaction-pool/src/common/tests.rs +++ b/substrate/client/transaction-pool/src/common/tests.rs @@ -222,5 +222,5 @@ pub(crate) fn uxt(transfer: Transfer) -> Extrinsic { pub(crate) fn pool() -> (Pool, Arc) { let api = Arc::new(TestApi::default()); - (Pool::new(Default::default(), true.into(), api.clone()), api) + (Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api) } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 7679e3b169d2..d69aa37c94a1 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -329,14 +329,14 @@ where let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { loop { if let Some(dropped) = ctx.get_pending_dropped_transaction() { - debug!("dropped_watcher: sending out (pending): {dropped:?}"); + trace!("dropped_watcher: sending out (pending): {dropped:?}"); return Some((dropped, ctx)); } tokio::select! { biased; Some(event) = next_event(&mut ctx.stream_map) => { if let Some(dropped) = ctx.handle_event(event.0, event.1) { - debug!("dropped_watcher: sending out: {dropped:?}"); + trace!("dropped_watcher: sending out: {dropped:?}"); return Some((dropped, ctx)); } }, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 4ec87f1fefa4..e57256943ccf 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -318,7 +318,7 @@ where pool_api.clone(), listener.clone(), metrics.clone(), - TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * (options.ready.count + options.future.count), + TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * options.total_count(), options.ready.total_bytes + options.future.total_bytes, )); diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index ff9cc1541af4..4c0ace0b1c73 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -158,6 +158,13 @@ impl Default for Options { } } +impl Options { + /// Total (ready+future) maximal number of transactions in the pool. + pub fn total_count(&self) -> usize { + self.ready.count + self.future.count + } +} + /// Should we check that the transaction is banned /// in the pool, before we verify it? #[derive(Copy, Clone)] @@ -172,6 +179,21 @@ pub struct Pool { } impl Pool { + /// Create a new transaction pool with statically sized rotator. + pub fn new_with_staticly_sized_rotator( + options: Options, + is_validator: IsValidator, + api: Arc, + ) -> Self { + Self { + validated_pool: Arc::new(ValidatedPool::new_with_staticly_sized_rotator( + options, + is_validator, + api, + )), + } + } + /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { Self { validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)) } @@ -284,6 +306,7 @@ impl Pool { let mut validated_counter: usize = 0; let mut future_tags = Vec::new(); + let now = Instant::now(); for (extrinsic, in_pool_tags) in all { match in_pool_tags { // reuse the tags for extrinsics that were found in the pool @@ -319,7 +342,7 @@ impl Pool { } } - log::trace!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}"); + log::debug!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}, took:{:?}", now.elapsed()); self.prune_tags(at, future_tags, in_pool_hashes).await } @@ -351,6 +374,7 @@ impl Pool { tags: impl IntoIterator, known_imported_hashes: impl IntoIterator> + Clone, ) { + let now = Instant::now(); log::trace!(target: LOG_TARGET, "Pruning at {:?}", at); // Prune all transactions that provide given tags let prune_status = self.validated_pool.prune_tags(tags); @@ -369,9 +393,8 @@ impl Pool { let reverified_transactions = self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; - let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect(); - - log::trace!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}", &at, reverified_transactions.len()); + let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect::>(); + log::debug!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}, reverification took: {:?}", &at, reverified_transactions.len(), now.elapsed()); log_xt_trace!(data: tuple, target: LOG_TARGET, &reverified_transactions, "[{:?}] Resubmitting transaction: {:?}"); // And finally - submit reverified transactions back to the pool @@ -580,7 +603,7 @@ mod tests { fn should_reject_unactionable_transactions() { // given let api = Arc::new(TestApi::default()); - let pool = Pool::new( + let pool = Pool::new_with_staticly_sized_rotator( Default::default(), // the node does not author blocks false.into(), @@ -767,7 +790,7 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); let hash1 = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap(); @@ -803,7 +826,7 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); // when block_on( @@ -1036,7 +1059,7 @@ mod tests { Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); let xt = uxt(Transfer { from: Alice.into(), @@ -1074,7 +1097,7 @@ mod tests { Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) @@ -1106,7 +1129,7 @@ mod tests { Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); let han_of_block0 = api.expect_hash_and_number(0); @@ -1151,7 +1174,11 @@ mod tests { let mut api = TestApi::default(); api.delay = Arc::new(Mutex::new(rx.into())); let api = Arc::new(api); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let pool = Arc::new(Pool::new_with_staticly_sized_rotator( + Default::default(), + true.into(), + api.clone(), + )); let han_of_block0 = api.expect_hash_and_number(0); diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 9a2e269b5eed..80d8f24144c8 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -31,7 +31,10 @@ use std::{ use super::base_pool::Transaction; /// Expected size of the banned extrinsics cache. -const EXPECTED_SIZE: usize = 2048; +const DEFAULT_EXPECTED_SIZE: usize = 2048; + +/// The default duration, in seconds, for which an extrinsic is banned. +const DEFAULT_BAN_TIME_SECS: u64 = 30 * 60; /// Pool rotator is responsible to only keep fresh extrinsics in the pool. /// @@ -42,18 +45,39 @@ pub struct PoolRotator { ban_time: Duration, /// Currently banned extrinsics. banned_until: RwLock>, + /// Expected size of the banned extrinsics cache. + expected_size: usize, +} + +impl Clone for PoolRotator { + fn clone(&self) -> Self { + Self { + ban_time: self.ban_time, + banned_until: RwLock::new(self.banned_until.read().clone()), + expected_size: self.expected_size, + } + } } impl Default for PoolRotator { fn default() -> Self { - Self { ban_time: Duration::from_secs(60 * 30), banned_until: Default::default() } + Self { + ban_time: Duration::from_secs(DEFAULT_BAN_TIME_SECS), + banned_until: Default::default(), + expected_size: DEFAULT_EXPECTED_SIZE, + } } } impl PoolRotator { /// New rotator instance with specified ban time. pub fn new(ban_time: Duration) -> Self { - Self { ban_time, banned_until: Default::default() } + Self { ban_time, ..Self::default() } + } + + /// New rotator instance with specified ban time and expected cache size. + pub fn new_with_expected_size(ban_time: Duration, expected_size: usize) -> Self { + Self { expected_size, ..Self::new(ban_time) } } /// Returns `true` if extrinsic hash is currently banned. @@ -69,8 +93,8 @@ impl PoolRotator { banned.insert(hash, *now + self.ban_time); } - if banned.len() > 2 * EXPECTED_SIZE { - while banned.len() > EXPECTED_SIZE { + if banned.len() > 2 * self.expected_size { + while banned.len() > self.expected_size { if let Some(key) = banned.keys().next().cloned() { banned.remove(&key); } @@ -201,16 +225,16 @@ mod tests { let past_block = 0; // when - for i in 0..2 * EXPECTED_SIZE { + for i in 0..2 * DEFAULT_EXPECTED_SIZE { let tx = tx_with(i as u64, past_block); assert!(rotator.ban_if_stale(&now, past_block, &tx)); } - assert_eq!(rotator.banned_until.read().len(), 2 * EXPECTED_SIZE); + assert_eq!(rotator.banned_until.read().len(), 2 * DEFAULT_EXPECTED_SIZE); // then - let tx = tx_with(2 * EXPECTED_SIZE as u64, past_block); + let tx = tx_with(2 * DEFAULT_EXPECTED_SIZE as u64, past_block); // trigger a garbage collection assert!(rotator.ban_if_stale(&now, past_block, &tx)); - assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE); + assert_eq!(rotator.banned_until.read().len(), DEFAULT_EXPECTED_SIZE); } } diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 14df63d9673e..3f7bf4773de7 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -121,16 +121,41 @@ impl Clone for ValidatedPool { listener: Default::default(), pool: RwLock::from(self.pool.read().clone()), import_notification_sinks: Default::default(), - rotator: PoolRotator::default(), + rotator: self.rotator.clone(), } } } impl ValidatedPool { + /// Create a new transaction pool with statically sized rotator. + pub fn new_with_staticly_sized_rotator( + options: Options, + is_validator: IsValidator, + api: Arc, + ) -> Self { + let ban_time = options.ban_time; + Self::new_with_rotator(options, is_validator, api, PoolRotator::new(ban_time)) + } + /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { - let base_pool = base::BasePool::new(options.reject_future_transactions); let ban_time = options.ban_time; + let total_count = options.total_count(); + Self::new_with_rotator( + options, + is_validator, + api, + PoolRotator::new_with_expected_size(ban_time, total_count), + ) + } + + fn new_with_rotator( + options: Options, + is_validator: IsValidator, + api: Arc, + rotator: PoolRotator>, + ) -> Self { + let base_pool = base::BasePool::new(options.reject_future_transactions); Self { is_validator, options, @@ -138,7 +163,7 @@ impl ValidatedPool { api, pool: RwLock::new(base_pool), import_notification_sinks: Default::default(), - rotator: PoolRotator::new(ban_time), + rotator, } } diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index f22fa2ddabde..caa09585b28b 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -384,7 +384,11 @@ mod tests { #[test] fn revalidation_queue_works() { let api = Arc::new(TestApi::default()); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let pool = Arc::new(Pool::new_with_staticly_sized_rotator( + Default::default(), + true.into(), + api.clone(), + )); let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); let uxt = uxt(Transfer { @@ -414,7 +418,11 @@ mod tests { #[test] fn revalidation_queue_skips_revalidation_for_unknown_block_hash() { let api = Arc::new(TestApi::default()); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let pool = Arc::new(Pool::new_with_staticly_sized_rotator( + Default::default(), + true.into(), + api.clone(), + )); let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); let uxt0 = uxt(Transfer { diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index e7504012ca67..2b32704945c7 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -141,7 +141,11 @@ where finalized_hash: Block::Hash, options: graph::Options, ) -> (Self, Pin + Send>>) { - let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone())); + let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator( + options, + true.into(), + pool_api.clone(), + )); let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( pool_api.clone(), pool.clone(), @@ -177,7 +181,11 @@ where best_block_hash: Block::Hash, finalized_hash: Block::Hash, ) -> Self { - let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); + let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator( + options, + is_validator, + pool_api.clone(), + )); let (revalidation_queue, background_task) = match revalidation_type { RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index 8bf08122995c..dd82c52a6047 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -2199,7 +2199,7 @@ fn import_sink_works3() { pool.submit_one(genesis, SOURCE, xt1.clone()), ]; - let x = block_on(futures::future::join_all(submissions)); + block_on(futures::future::join_all(submissions)); let header01a = api.push_block(1, vec![], true); let header01b = api.push_block(1, vec![], true); @@ -2213,8 +2213,6 @@ fn import_sink_works3() { assert_pool_status!(header01a.hash(), &pool, 1, 1); assert_pool_status!(header01b.hash(), &pool, 1, 1); - log::debug!("xxx {x:#?}"); - let import_events = futures::executor::block_on_stream(import_stream).take(1).collect::>(); diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 20997606c607..de35726435f0 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -49,7 +49,7 @@ const LOG_TARGET: &str = "txpool"; fn pool() -> (Pool, Arc) { let api = Arc::new(TestApi::with_alice_nonce(209)); - (Pool::new(Default::default(), true.into(), api.clone()), api) + (Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api) } fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { @@ -224,7 +224,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| { v.provides.push(vec![155]); })); - let pool = Pool::new(Default::default(), true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()); let xt0 = Arc::from(uxt(Alice, 209)); block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone())) .expect("1. Imported"); From cccefdd965c39498825f34e105979c447b315359 Mon Sep 17 00:00:00 2001 From: "polka.dom" Date: Mon, 13 Jan 2025 16:22:32 -0500 Subject: [PATCH 3/4] Remove usage of the pallet::getter macro from pallet-grandpa (#4529) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As per #3326, removes pallet::getter macro usage from pallet-grandpa. The syntax `StorageItem::::get()` should be used instead. cc @muraca --------- Co-authored-by: Bastian Köcher --- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/test-runtime/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- prdoc/pr_4529.prdoc | 22 ++++ substrate/bin/node/runtime/src/lib.rs | 2 +- substrate/frame/grandpa/src/benchmarking.rs | 4 +- substrate/frame/grandpa/src/equivocation.rs | 2 +- substrate/frame/grandpa/src/lib.rs | 106 +++++++++++++------- substrate/frame/grandpa/src/tests.rs | 89 ++++++++-------- 9 files changed, 144 insertions(+), 87 deletions(-) create mode 100644 prdoc/pr_4529.prdoc diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index cab4394eb5a8..e5d703700fee 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2276,7 +2276,7 @@ sp_api::impl_runtime_apis! { } fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 82564d5c278c..4f9ba8d8508c 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -1186,7 +1186,7 @@ sp_api::impl_runtime_apis! { } fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 166f3fc42eef..9d77a5e5eea1 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2300,7 +2300,7 @@ sp_api::impl_runtime_apis! { } fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/prdoc/pr_4529.prdoc b/prdoc/pr_4529.prdoc new file mode 100644 index 000000000000..32beea17ad6b --- /dev/null +++ b/prdoc/pr_4529.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-grandpa + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-grandpa`. + The syntax `StorageItem::::get()` should be used instead + +crates: + - name: pallet-grandpa + bump: minor + - name: kitchensink-runtime + bump: none + - name: westend-runtime + bump: none + - name: polkadot-test-runtime + bump: none + - name: rococo-runtime + bump: none diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 7de04b27ff83..e11a009c1c3f 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2979,7 +2979,7 @@ impl_runtime_apis! { } fn current_set_id() -> sp_consensus_grandpa::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs index 0a10e5882776..56048efa22ca 100644 --- a/substrate/frame/grandpa/src/benchmarking.rs +++ b/substrate/frame/grandpa/src/benchmarking.rs @@ -17,7 +17,7 @@ //! Benchmarks for the GRANDPA pallet. -use super::{Pallet as Grandpa, *}; +use super::*; use frame_benchmarking::v2::*; use frame_system::RawOrigin; use sp_core::H256; @@ -69,7 +69,7 @@ mod benchmarks { #[extrinsic_call] _(RawOrigin::Root, delay, best_finalized_block_number); - assert!(Grandpa::::stalled().is_some()); + assert!(Stalled::::get().is_some()); } impl_benchmark_test_suite!( diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs index 2366c957e9ab..4ebdbc1eecd3 100644 --- a/substrate/frame/grandpa/src/equivocation.rs +++ b/substrate/frame/grandpa/src/equivocation.rs @@ -177,7 +177,7 @@ where evidence: (EquivocationProof>, T::KeyOwnerProof), ) -> Result<(), DispatchError> { let (equivocation_proof, key_owner_proof) = evidence; - let reporter = reporter.or_else(|| >::author()); + let reporter = reporter.or_else(|| pallet_authorship::Pallet::::author()); let offender = equivocation_proof.offender().clone(); // We check the equivocation within the context of its set id (and diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 4f69aeaef523..9017eec2ca8f 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -127,7 +127,7 @@ pub mod pallet { impl Hooks> for Pallet { fn on_finalize(block_number: BlockNumberFor) { // check for scheduled pending authority set changes - if let Some(pending_change) = >::get() { + if let Some(pending_change) = PendingChange::::get() { // emit signal if we're at the block that scheduled the change if block_number == pending_change.scheduled_at { let next_authorities = pending_change.next_authorities.to_vec(); @@ -150,12 +150,12 @@ pub mod pallet { Self::deposit_event(Event::NewAuthorities { authority_set: pending_change.next_authorities.into_inner(), }); - >::kill(); + PendingChange::::kill(); } } // check for scheduled pending state changes - match >::get() { + match State::::get() { StoredState::PendingPause { scheduled_at, delay } => { // signal change to pause if block_number == scheduled_at { @@ -164,7 +164,7 @@ pub mod pallet { // enact change to paused state if block_number == scheduled_at + delay { - >::put(StoredState::Paused); + State::::put(StoredState::Paused); Self::deposit_event(Event::Paused); } }, @@ -176,7 +176,7 @@ pub mod pallet { // enact change to live state if block_number == scheduled_at + delay { - >::put(StoredState::Live); + State::::put(StoredState::Live); Self::deposit_event(Event::Resumed); } }, @@ -297,37 +297,32 @@ pub mod pallet { } #[pallet::type_value] - pub(super) fn DefaultForState() -> StoredState> { + pub fn DefaultForState() -> StoredState> { StoredState::Live } /// State of the current authority set. #[pallet::storage] - #[pallet::getter(fn state)] - pub(super) type State = + pub type State = StorageValue<_, StoredState>, ValueQuery, DefaultForState>; /// Pending change: (signaled at, scheduled change). #[pallet::storage] - #[pallet::getter(fn pending_change)] - pub(super) type PendingChange = + pub type PendingChange = StorageValue<_, StoredPendingChange, T::MaxAuthorities>>; /// next block number where we can force a change. #[pallet::storage] - #[pallet::getter(fn next_forced)] - pub(super) type NextForced = StorageValue<_, BlockNumberFor>; + pub type NextForced = StorageValue<_, BlockNumberFor>; /// `true` if we are currently stalled. #[pallet::storage] - #[pallet::getter(fn stalled)] - pub(super) type Stalled = StorageValue<_, (BlockNumberFor, BlockNumberFor)>; + pub type Stalled = StorageValue<_, (BlockNumberFor, BlockNumberFor)>; /// The number of changes (both in terms of keys and underlying economic responsibilities) /// in the "set" of Grandpa validators from genesis. #[pallet::storage] - #[pallet::getter(fn current_set_id)] - pub(super) type CurrentSetId = StorageValue<_, SetId, ValueQuery>; + pub type CurrentSetId = StorageValue<_, SetId, ValueQuery>; /// A mapping from grandpa set ID to the index of the *most recent* session for which its /// members were responsible. @@ -340,12 +335,11 @@ pub mod pallet { /// /// TWOX-NOTE: `SetId` is not under user control. #[pallet::storage] - #[pallet::getter(fn session_for_set)] - pub(super) type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; + pub type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; /// The current list of authorities. #[pallet::storage] - pub(crate) type Authorities = + pub type Authorities = StorageValue<_, BoundedAuthorityList, ValueQuery>; #[derive(frame_support::DefaultNoBound)] @@ -432,6 +426,44 @@ pub enum StoredState { } impl Pallet { + /// State of the current authority set. + pub fn state() -> StoredState> { + State::::get() + } + + /// Pending change: (signaled at, scheduled change). + pub fn pending_change() -> Option, T::MaxAuthorities>> { + PendingChange::::get() + } + + /// next block number where we can force a change. + pub fn next_forced() -> Option> { + NextForced::::get() + } + + /// `true` if we are currently stalled. + pub fn stalled() -> Option<(BlockNumberFor, BlockNumberFor)> { + Stalled::::get() + } + + /// The number of changes (both in terms of keys and underlying economic responsibilities) + /// in the "set" of Grandpa validators from genesis. + pub fn current_set_id() -> SetId { + CurrentSetId::::get() + } + + /// A mapping from grandpa set ID to the index of the *most recent* session for which its + /// members were responsible. + /// + /// This is only used for validating equivocation proofs. An equivocation proof must + /// contains a key-ownership proof for a given session, therefore we need a way to tie + /// together sessions and GRANDPA set ids, i.e. we need to validate that a validator + /// was the owner of a given key on a given session, and what the active set ID was + /// during that session. + pub fn session_for_set(set_id: SetId) -> Option { + SetIdSession::::get(set_id) + } + /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { Authorities::::get().into_inner() @@ -440,9 +472,9 @@ impl Pallet { /// Schedule GRANDPA to pause starting in the given number of blocks. /// Cannot be done when already paused. pub fn schedule_pause(in_blocks: BlockNumberFor) -> DispatchResult { - if let StoredState::Live = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingPause { delay: in_blocks, scheduled_at }); + if let StoredState::Live = State::::get() { + let scheduled_at = frame_system::Pallet::::block_number(); + State::::put(StoredState::PendingPause { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -452,9 +484,9 @@ impl Pallet { /// Schedule a resume of GRANDPA after pausing. pub fn schedule_resume(in_blocks: BlockNumberFor) -> DispatchResult { - if let StoredState::Paused = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingResume { delay: in_blocks, scheduled_at }); + if let StoredState::Paused = State::::get() { + let scheduled_at = frame_system::Pallet::::block_number(); + State::::put(StoredState::PendingResume { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -481,17 +513,17 @@ impl Pallet { in_blocks: BlockNumberFor, forced: Option>, ) -> DispatchResult { - if !>::exists() { - let scheduled_at = >::block_number(); + if !PendingChange::::exists() { + let scheduled_at = frame_system::Pallet::::block_number(); if forced.is_some() { - if Self::next_forced().map_or(false, |next| next > scheduled_at) { + if NextForced::::get().map_or(false, |next| next > scheduled_at) { return Err(Error::::TooSoon.into()) } // only allow the next forced change when twice the window has passed since // this one. - >::put(scheduled_at + in_blocks * 2u32.into()); + NextForced::::put(scheduled_at + in_blocks * 2u32.into()); } let next_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( @@ -502,7 +534,7 @@ impl Pallet { ), ); - >::put(StoredPendingChange { + PendingChange::::put(StoredPendingChange { delay: in_blocks, scheduled_at, next_authorities, @@ -518,7 +550,7 @@ impl Pallet { /// Deposit one of this module's logs. fn deposit_log(log: ConsensusLog>) { let log = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); - >::deposit_log(log); + frame_system::Pallet::::deposit_log(log); } // Perform module initialization, abstracted so that it can be called either through genesis @@ -554,7 +586,7 @@ impl Pallet { // when we record old authority sets we could try to figure out _who_ // failed. until then, we can't meaningfully guard against // `next == last` the way that normal session changes do. - >::put((further_wait, median)); + Stalled::::put((further_wait, median)); } } @@ -583,10 +615,10 @@ where // Always issue a change if `session` says that the validators have changed. // Even if their session keys are the same as before, the underlying economic // identities have changed. - let current_set_id = if changed || >::exists() { + let current_set_id = if changed || Stalled::::exists() { let next_authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - let res = if let Some((further_wait, median)) = >::take() { + let res = if let Some((further_wait, median)) = Stalled::::take() { Self::schedule_change(next_authorities, further_wait, Some(median)) } else { Self::schedule_change(next_authorities, Zero::zero(), None) @@ -608,17 +640,17 @@ where // either the session module signalled that the validators have changed // or the set was stalled. but since we didn't successfully schedule // an authority set change we do not increment the set id. - Self::current_set_id() + CurrentSetId::::get() } } else { // nothing's changed, neither economic conditions nor session keys. update the pointer // of the current set. - Self::current_set_id() + CurrentSetId::::get() }; // update the mapping to note that the current set corresponds to the // latest equivalent session (i.e. now). - let session_index = >::current_index(); + let session_index = pallet_session::Pallet::::current_index(); SetIdSession::::insert(current_set_id, &session_index); } diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 383f77f00de7..f4720966b179 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -110,7 +110,7 @@ fn cannot_schedule_change_when_one_pending() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { initialize_block(1, Default::default()); Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -120,7 +120,7 @@ fn cannot_schedule_change_when_one_pending() { let header = System::finalize(); initialize_block(2, header.hash()); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -130,7 +130,7 @@ fn cannot_schedule_change_when_one_pending() { let header = System::finalize(); initialize_block(3, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None)); Grandpa::on_finalize(3); @@ -144,7 +144,7 @@ fn dispatch_forced_change() { initialize_block(1, Default::default()); Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 5, Some(0)).unwrap(); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)), Error::::ChangePending @@ -155,8 +155,8 @@ fn dispatch_forced_change() { for i in 2..7 { initialize_block(i, header.hash()); - assert!(>::get().unwrap().forced.is_some()); - assert_eq!(Grandpa::next_forced(), Some(11)); + assert!(PendingChange::::get().unwrap().forced.is_some()); + assert_eq!(NextForced::::get(), Some(11)); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -174,7 +174,7 @@ fn dispatch_forced_change() { // add a normal change. { initialize_block(7, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_eq!( Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)]) @@ -187,7 +187,7 @@ fn dispatch_forced_change() { // run the normal change. { initialize_block(8, header.hash()); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_eq!( Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)]) @@ -204,9 +204,9 @@ fn dispatch_forced_change() { // time. for i in 9..11 { initialize_block(i, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(5, 1)])); - assert_eq!(Grandpa::next_forced(), Some(11)); + assert_eq!(NextForced::::get(), Some(11)); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)), Error::::TooSoon @@ -217,13 +217,13 @@ fn dispatch_forced_change() { { initialize_block(11, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_ok!(Grandpa::schedule_change( to_authorities(vec![(5, 1), (6, 1), (7, 1)]), 5, Some(0) )); - assert_eq!(Grandpa::next_forced(), Some(21)); + assert_eq!(NextForced::::get(), Some(21)); Grandpa::on_finalize(11); header = System::finalize(); } @@ -239,7 +239,10 @@ fn schedule_pause_only_when_live() { Grandpa::schedule_pause(1).unwrap(); // we've switched to the pending pause state - assert_eq!(Grandpa::state(), StoredState::PendingPause { scheduled_at: 1u64, delay: 1 }); + assert_eq!( + State::::get(), + StoredState::PendingPause { scheduled_at: 1u64, delay: 1 } + ); Grandpa::on_finalize(1); let _ = System::finalize(); @@ -253,7 +256,7 @@ fn schedule_pause_only_when_live() { let _ = System::finalize(); // after finalizing block 2 the set should have switched to paused state - assert_eq!(Grandpa::state(), StoredState::Paused); + assert_eq!(State::::get(), StoredState::Paused); }); } @@ -265,14 +268,14 @@ fn schedule_resume_only_when_paused() { // the set is currently live, resuming it is an error assert_noop!(Grandpa::schedule_resume(1), Error::::ResumeFailed); - assert_eq!(Grandpa::state(), StoredState::Live); + assert_eq!(State::::get(), StoredState::Live); // we schedule a pause to be applied instantly Grandpa::schedule_pause(0).unwrap(); Grandpa::on_finalize(1); let _ = System::finalize(); - assert_eq!(Grandpa::state(), StoredState::Paused); + assert_eq!(State::::get(), StoredState::Paused); // we schedule the set to go back live in 2 blocks initialize_block(2, Default::default()); @@ -289,7 +292,7 @@ fn schedule_resume_only_when_paused() { let _ = System::finalize(); // it should be live at block 4 - assert_eq!(Grandpa::state(), StoredState::Live); + assert_eq!(State::::get(), StoredState::Live); }); } @@ -342,7 +345,7 @@ fn report_equivocation_current_set_works() { let equivocation_key = &authorities[equivocation_authority_index].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof, with two votes in the same round for // different block hashes signed by the same key @@ -424,7 +427,7 @@ fn report_equivocation_old_set_works() { let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof for the old set, let equivocation_proof = generate_equivocation_proof( @@ -487,7 +490,7 @@ fn report_equivocation_invalid_set_id() { let key_owner_proof = Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation for a future set let equivocation_proof = generate_equivocation_proof( @@ -527,7 +530,7 @@ fn report_equivocation_invalid_session() { start_era(2); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof at set id = 2 let equivocation_proof = generate_equivocation_proof( @@ -568,7 +571,7 @@ fn report_equivocation_invalid_key_owner_proof() { let equivocation_key = &authorities[equivocation_authority_index].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof for the authority at index 0 let equivocation_proof = generate_equivocation_proof( @@ -611,7 +614,7 @@ fn report_equivocation_invalid_equivocation_proof() { let key_owner_proof = Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); let assert_invalid_equivocation_proof = |equivocation_proof| { assert_err!( @@ -675,7 +678,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let equivocation_authority_index = 0; let equivocation_key = &authorities[equivocation_authority_index].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); let equivocation_proof = generate_equivocation_proof( set_id, @@ -748,12 +751,12 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { #[test] fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - assert_eq!(Grandpa::current_set_id(), 0); + assert_eq!(CurrentSetId::::get(), 0); // starting a new era should lead to a change in the session // validators and trigger a new set start_era(1); - assert_eq!(Grandpa::current_set_id(), 1); + assert_eq!(CurrentSetId::::get(), 1); // we schedule a change delayed by 2 blocks, this should make it so that // when we try to rotate the session at the beginning of the era we will @@ -761,22 +764,22 @@ fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() { // not increment the set id. Grandpa::schedule_change(to_authorities(vec![(1, 1)]), 2, None).unwrap(); start_era(2); - assert_eq!(Grandpa::current_set_id(), 1); + assert_eq!(CurrentSetId::::get(), 1); // everything should go back to normal after. start_era(3); - assert_eq!(Grandpa::current_set_id(), 2); + assert_eq!(CurrentSetId::::get(), 2); // session rotation might also fail to schedule a change if it's for a // forced change (i.e. grandpa is stalled) and it is too soon. - >::put(1000); - >::put((30, 1)); + NextForced::::put(1000); + Stalled::::put((30, 1)); // NOTE: we cannot go through normal era rotation since having `Stalled` // defined will also trigger a new set (regardless of whether the // session validators changed) Grandpa::on_new_session(true, std::iter::empty(), std::iter::empty()); - assert_eq!(Grandpa::current_set_id(), 2); + assert_eq!(CurrentSetId::::get(), 2); }); } @@ -790,19 +793,19 @@ fn cleans_up_old_set_id_session_mappings() { // we should have a session id mapping for all the set ids from // `max_set_id_session_entries` eras we have observed for i in 1..=max_set_id_session_entries { - assert!(Grandpa::session_for_set(i as u64).is_some()); + assert!(SetIdSession::::get(i as u64).is_some()); } start_era(max_set_id_session_entries * 2); // we should keep tracking the new mappings for new eras for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 { - assert!(Grandpa::session_for_set(i as u64).is_some()); + assert!(SetIdSession::::get(i as u64).is_some()); } // but the old ones should have been pruned by now for i in 1..=max_set_id_session_entries { - assert!(Grandpa::session_for_set(i as u64).is_none()); + assert!(SetIdSession::::get(i as u64).is_none()); } }); } @@ -812,24 +815,24 @@ fn always_schedules_a_change_on_new_session_when_stalled() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { start_era(1); - assert!(Grandpa::pending_change().is_none()); - assert_eq!(Grandpa::current_set_id(), 1); + assert!(PendingChange::::get().is_none()); + assert_eq!(CurrentSetId::::get(), 1); // if the session handler reports no change then we should not schedule // any pending change Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty()); - assert!(Grandpa::pending_change().is_none()); - assert_eq!(Grandpa::current_set_id(), 1); + assert!(PendingChange::::get().is_none()); + assert_eq!(CurrentSetId::::get(), 1); // if grandpa is stalled then we should **always** schedule a forced // change on a new session - >::put((10, 1)); + Stalled::::put((10, 1)); Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty()); - assert!(Grandpa::pending_change().is_some()); - assert!(Grandpa::pending_change().unwrap().forced.is_some()); - assert_eq!(Grandpa::current_set_id(), 2); + assert!(PendingChange::::get().is_some()); + assert!(PendingChange::::get().unwrap().forced.is_some()); + assert_eq!(CurrentSetId::::get(), 2); }); } @@ -861,7 +864,7 @@ fn valid_equivocation_reports_dont_pay_fees() { let equivocation_key = &Grandpa::grandpa_authorities()[0].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof. let equivocation_proof = generate_equivocation_proof( From ddffa027d7b78af330a2d3d18b7dfdbd00e431f0 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Tue, 14 Jan 2025 10:40:50 +0200 Subject: [PATCH 4/4] forbid v1 descriptors with UMP signals (#7127) --- .../node/core/candidate-validation/src/lib.rs | 15 ++-- .../core/candidate-validation/src/tests.rs | 71 +++++++++++++++++-- polkadot/primitives/src/vstaging/mod.rs | 30 ++++++-- prdoc/pr_7127.prdoc | 9 +++ 4 files changed, 104 insertions(+), 21 deletions(-) create mode 100644 prdoc/pr_7127.prdoc diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 25614349486e..2a4643031bf8 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -912,15 +912,10 @@ async fn validate_candidate_exhaustive( // invalid. Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) } else { - let core_index = candidate_receipt.descriptor.core_index(); - - match (core_index, exec_kind) { + match exec_kind { // Core selectors are optional for V2 descriptors, but we still check the // descriptor core index. - ( - Some(_core_index), - PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_), - ) => { + PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => { let Some(claim_queue) = maybe_claim_queue else { let error = "cannot fetch the claim queue from the runtime"; gum::warn!( @@ -937,9 +932,9 @@ async fn validate_candidate_exhaustive( { gum::warn!( target: LOG_TARGET, - ?err, candidate_hash = ?candidate_receipt.hash(), - "Candidate core index is invalid", + "Candidate core index is invalid: {}", + err ); return Ok(ValidationResult::Invalid( InvalidCandidate::InvalidCoreIndex, @@ -947,7 +942,7 @@ async fn validate_candidate_exhaustive( } }, // No checks for approvals and disputes - (_, _) => {}, + _ => {}, } Ok(ValidationResult::Valid( diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 98e34a1cb4c1..795d7c93f8a7 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -30,8 +30,8 @@ use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ vstaging::{ - CandidateDescriptorV2, ClaimQueueOffset, CoreSelector, MutateDescriptorV2, UMPSignal, - UMP_SEPARATOR, + CandidateDescriptorV2, CandidateDescriptorVersion, ClaimQueueOffset, CoreSelector, + MutateDescriptorV2, UMPSignal, UMP_SEPARATOR, }, CandidateDescriptor, CoreIndex, GroupIndex, HeadData, Id as ParaId, OccupiedCoreAssumption, SessionInfo, UpwardMessage, ValidatorId, @@ -851,7 +851,7 @@ fn invalid_session_or_core_index() { )) .unwrap(); - // Validation doesn't fail for approvals, core/session index is not checked. + // Validation doesn't fail for disputes, core/session index is not checked. assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); assert_eq!(outputs.upward_messages, commitments.upward_messages); @@ -911,6 +911,69 @@ fn invalid_session_or_core_index() { assert_eq!(outputs.hrmp_watermark, 0); assert_eq!(used_validation_data, validation_data); }); + + // Test that a v1 candidate that outputs the core selector UMP signal is invalid. + let descriptor_v1 = make_valid_candidate_descriptor( + ParaId::from(1_u32), + dummy_hash(), + dummy_hash(), + pov.hash(), + validation_code.hash(), + validation_result.head_data.hash(), + dummy_hash(), + sp_keyring::Sr25519Keyring::Ferdie, + ); + let descriptor: CandidateDescriptorV2 = descriptor_v1.into(); + + perform_basic_checks(&descriptor, validation_data.max_pov_size, &pov, &validation_code.hash()) + .unwrap(); + assert_eq!(descriptor.version(), CandidateDescriptorVersion::V1); + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + + for exec_kind in + [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] + { + let result = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + exec_kind, + &Default::default(), + Some(Default::default()), + )) + .unwrap(); + assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidCoreIndex)); + } + + // Validation doesn't fail for approvals and disputes, core/session index is not checked. + for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + exec_kind, + &Default::default(), + Default::default(), + )) + .unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, commitments.upward_messages); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); + } } #[test] @@ -1407,7 +1470,7 @@ fn compressed_code_works() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Default::default(), + Some(Default::default()), )); assert_matches!(v, Ok(ValidationResult::Valid(_, _))); diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 271f78efe090..c52f3539c3e5 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -505,6 +505,10 @@ pub enum CommittedCandidateReceiptError { /// Currenly only one such message is allowed. #[cfg_attr(feature = "std", error("Too many UMP signals"))] TooManyUMPSignals, + /// If the parachain runtime started sending core selectors, v1 descriptors are no longer + /// allowed. + #[cfg_attr(feature = "std", error("Version 1 receipt does not support core selectors"))] + CoreSelectorWithV1Decriptor, } macro_rules! impl_getter { @@ -603,15 +607,25 @@ impl CommittedCandidateReceiptV2 { &self, cores_per_para: &TransposedClaimQueue, ) -> Result<(), CommittedCandidateReceiptError> { + let maybe_core_selector = self.commitments.core_selector()?; + match self.descriptor.version() { - // Don't check v1 descriptors. - CandidateDescriptorVersion::V1 => return Ok(()), + CandidateDescriptorVersion::V1 => { + // If the parachain runtime started sending core selectors, v1 descriptors are no + // longer allowed. + if maybe_core_selector.is_some() { + return Err(CommittedCandidateReceiptError::CoreSelectorWithV1Decriptor) + } else { + // Nothing else to check for v1 descriptors. + return Ok(()) + } + }, CandidateDescriptorVersion::V2 => {}, CandidateDescriptorVersion::Unknown => return Err(CommittedCandidateReceiptError::UnknownVersion(self.descriptor.version)), } - let (maybe_core_index_selector, cq_offset) = self.commitments.core_selector()?.map_or_else( + let (maybe_core_index_selector, cq_offset) = maybe_core_selector.map_or_else( || (None, ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)), |(sel, off)| (Some(sel), off), ); @@ -1207,8 +1221,7 @@ mod tests { assert_eq!(new_ccr.hash(), v2_ccr.hash()); } - // Only check descriptor `core_index` field of v2 descriptors. If it is v1, that field - // will be garbage. + // V1 descriptors are forbidden once the parachain runtime started sending UMP signals. #[test] fn test_v1_descriptors_with_ump_signal() { let mut ccr = dummy_old_committed_candidate_receipt(); @@ -1234,9 +1247,12 @@ mod tests { cq.insert(CoreIndex(0), vec![v1_ccr.descriptor.para_id()].into()); cq.insert(CoreIndex(1), vec![v1_ccr.descriptor.para_id()].into()); - assert!(v1_ccr.check_core_index(&transpose_claim_queue(cq)).is_ok()); - assert_eq!(v1_ccr.descriptor.core_index(), None); + + assert_eq!( + v1_ccr.check_core_index(&transpose_claim_queue(cq)), + Err(CommittedCandidateReceiptError::CoreSelectorWithV1Decriptor) + ); } #[test] diff --git a/prdoc/pr_7127.prdoc b/prdoc/pr_7127.prdoc new file mode 100644 index 000000000000..761ddd04dbe1 --- /dev/null +++ b/prdoc/pr_7127.prdoc @@ -0,0 +1,9 @@ +title: 'Forbid v1 descriptors with UMP signals' +doc: +- audience: [Runtime Dev, Node Dev] + description: Adds a check that parachain candidates do not send out UMP signals with v1 descriptors. +crates: +- name: polkadot-node-core-candidate-validation + bump: minor +- name: polkadot-primitives + bump: major