From 0bcebac4fc69f6bef5da7d94615916bbde294967 Mon Sep 17 00:00:00 2001 From: Dastan <88332432+dastansam@users.noreply.github.com> Date: Thu, 26 Oct 2023 15:52:12 +0600 Subject: [PATCH 01/69] Expose collection attributes from `Inspect` trait (#1914) # Description - What does this PR do? While working with `pallet_nfts` through `nonfungibles_v2` traits `Inspect, Mutate`, I found out that once you have set the collection attribute with `::set_collection_attribute()`, it's not possible to read it with `::collection_attribute()` since they use different `namespace` values. When setting the attribute, `AttributeNamespace::Pallet` is used, while `AttributeNamespace::CollectionOwner` is used when reading. more context: https://github.com/freeverseio/laos/issues/7#issuecomment-1766137370 This PR makes `item` an optional parameter in `Inspect::system_attribute()`, to be able to read collection attributes. - Why are these changes needed? To be able to read collection level attributes when reading attributes of the collection. It will be possible to read collection attributes by passing `None` for `item` - How were these changes implemented and what do they affect? `NftsApi` is also affected and `NftsApi::system_attribute()` now accepts optional `item` parameter. ## Breaking change Because of the change in the `NftsApi::system_attribute()` method's `item` param, parachains who integrated the `NftsApi` need to update their API code and frontend integrations accordingly. AssetHubs are unaffected since the NftsApi wasn't released on those parachains yet. --- .../assets/asset-hub-westend/src/lib.rs | 4 +- substrate/bin/node/runtime/src/lib.rs | 4 +- substrate/frame/nfts/runtime-api/src/lib.rs | 2 +- substrate/frame/nfts/src/impl_nonfungibles.rs | 8 +- substrate/frame/nfts/src/tests.rs | 82 ++++++++++++++++++- .../src/traits/tokens/nonfungible_v2.rs | 4 +- .../src/traits/tokens/nonfungibles_v2.rs | 13 +-- 7 files changed, 101 insertions(+), 16 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index c090536b3da6..30d384222422 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1081,10 +1081,10 @@ impl_runtime_apis! { fn system_attribute( collection: u32, - item: u32, + item: Option, key: Vec, ) -> Option> { - >::system_attribute(&collection, &item, &key) + >::system_attribute(&collection, item.as_ref(), &key) } fn collection_attribute(collection: u32, key: Vec) -> Option> { diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index f4c8a5940a3c..a2d100e1f8b5 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2630,10 +2630,10 @@ impl_runtime_apis! { fn system_attribute( collection: u32, - item: u32, + item: Option, key: Vec, ) -> Option> { - >::system_attribute(&collection, &item, &key) + >::system_attribute(&collection, item.as_ref(), &key) } fn collection_attribute(collection: u32, key: Vec) -> Option> { diff --git a/substrate/frame/nfts/runtime-api/src/lib.rs b/substrate/frame/nfts/runtime-api/src/lib.rs index cf2d444b42f8..77535c64069c 100644 --- a/substrate/frame/nfts/runtime-api/src/lib.rs +++ b/substrate/frame/nfts/runtime-api/src/lib.rs @@ -48,7 +48,7 @@ sp_api::decl_runtime_apis! { fn system_attribute( collection: CollectionId, - item: ItemId, + item: Option, key: Vec, ) -> Option>; diff --git a/substrate/frame/nfts/src/impl_nonfungibles.rs b/substrate/frame/nfts/src/impl_nonfungibles.rs index 4a6b70eb9973..ee7f42cfc689 100644 --- a/substrate/frame/nfts/src/impl_nonfungibles.rs +++ b/substrate/frame/nfts/src/impl_nonfungibles.rs @@ -79,17 +79,19 @@ impl, I: 'static> Inspect<::AccountId> for Palle Attribute::::get((collection, Some(item), namespace, key)).map(|a| a.0.into()) } - /// Returns the system attribute value of `item` of `collection` corresponding to `key`. + /// Returns the system attribute value of `item` of `collection` corresponding to `key` if + /// `item` is `Some`. Otherwise, returns the system attribute value of `collection` + /// corresponding to `key`. /// /// By default this is `None`; no attributes are defined. fn system_attribute( collection: &Self::CollectionId, - item: &Self::ItemId, + item: Option<&Self::ItemId>, key: &[u8], ) -> Option> { let namespace = AttributeNamespace::Pallet; let key = BoundedSlice::<_, _>::try_from(key).ok()?; - Attribute::::get((collection, Some(item), namespace, key)).map(|a| a.0.into()) + Attribute::::get((collection, item, namespace, key)).map(|a| a.0.into()) } /// Returns the attribute value of `item` of `collection` corresponding to `key`. diff --git a/substrate/frame/nfts/src/tests.rs b/substrate/frame/nfts/src/tests.rs index a82fcca01512..aeebf51b7c78 100644 --- a/substrate/frame/nfts/src/tests.rs +++ b/substrate/frame/nfts/src/tests.rs @@ -22,7 +22,7 @@ use enumflags2::BitFlags; use frame_support::{ assert_noop, assert_ok, traits::{ - tokens::nonfungibles_v2::{Create, Destroy, Mutate}, + tokens::nonfungibles_v2::{Create, Destroy, Inspect, Mutate}, Currency, Get, }, }; @@ -982,6 +982,86 @@ fn set_collection_owner_attributes_should_work() { }); } +#[test] +fn set_collection_system_attributes_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&account(1), 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + account(1), + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(account(1)), 0, 0, account(1), None)); + + let collection_id = 0; + let attribute_key = [0u8]; + let attribute_value = [0u8]; + + assert_ok!(, ItemConfig>>::set_collection_attribute( + &collection_id, + &attribute_key, + &attribute_value + )); + + assert_eq!(attributes(0), vec![(None, AttributeNamespace::Pallet, bvec![0], bvec![0])]); + + assert_eq!( + >>::system_attribute( + &collection_id, + None, + &attribute_key + ), + Some(attribute_value.to_vec()) + ); + + // test typed system attribute + let typed_attribute_key = [0u8; 32]; + #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] + struct TypedAttributeValue(u32); + let typed_attribute_value = TypedAttributeValue(42); + + assert_ok!( + , ItemConfig>>::set_typed_collection_attribute( + &collection_id, + &typed_attribute_key, + &typed_attribute_value + ) + ); + + assert_eq!( + >>::typed_system_attribute( + &collection_id, + None, + &typed_attribute_key + ), + Some(typed_attribute_value) + ); + + // check storage + assert_eq!( + attributes(collection_id), + [ + (None, AttributeNamespace::Pallet, bvec![0], bvec![0]), + ( + None, + AttributeNamespace::Pallet, + bvec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + ], + bvec![42, 0, 0, 0] + ) + ] + ); + + assert_ok!(Nfts::burn(RuntimeOrigin::root(), collection_id, 0)); + let w = Nfts::get_destroy_witness(&0).unwrap(); + assert_ok!(Nfts::destroy(RuntimeOrigin::signed(account(1)), collection_id, w)); + assert_eq!(attributes(collection_id), vec![]); + }) +} + #[test] fn set_item_owner_attributes_should_work() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs b/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs index 788a4d25e810..05f76e2859d2 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -226,7 +226,7 @@ impl< >::custom_attribute(account, &A::get(), item, key) } fn system_attribute(item: &Self::ItemId, key: &[u8]) -> Option> { - >::system_attribute(&A::get(), item, key) + >::system_attribute(&A::get(), Some(item), key) } fn typed_attribute(item: &Self::ItemId, key: &K) -> Option { >::typed_attribute(&A::get(), item, key) @@ -244,7 +244,7 @@ impl< ) } fn typed_system_attribute(item: &Self::ItemId, key: &K) -> Option { - >::typed_system_attribute(&A::get(), item, key) + >::typed_system_attribute(&A::get(), Some(item), key) } fn can_transfer(item: &Self::ItemId) -> bool { >::can_transfer(&A::get(), item) diff --git a/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs b/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs index 868afbdf7eee..c0209b6d5123 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -75,12 +75,14 @@ pub trait Inspect { None } - /// Returns the system attribute value of `item` of `collection` corresponding to `key`. + /// Returns the system attribute value of `item` of `collection` corresponding to `key` if + /// `item` is `Some`. Otherwise, returns the system attribute value of `collection` + /// corresponding to `key`. /// /// By default this is `None`; no attributes are defined. fn system_attribute( _collection: &Self::CollectionId, - _item: &Self::ItemId, + _item: Option<&Self::ItemId>, _key: &[u8], ) -> Option> { None @@ -113,13 +115,14 @@ pub trait Inspect { .and_then(|v| V::decode(&mut &v[..]).ok()) } - /// Returns the strongly-typed system attribute value of `item` of `collection` corresponding to - /// `key`. + /// Returns the strongly-typed system attribute value of `item` corresponding to `key` if + /// `item` is `Some`. Otherwise, returns the strongly-typed system attribute value of + /// `collection` corresponding to `key`. /// /// By default this just attempts to use `system_attribute`. fn typed_system_attribute( collection: &Self::CollectionId, - item: &Self::ItemId, + item: Option<&Self::ItemId>, key: &K, ) -> Option { key.using_encoded(|d| Self::system_attribute(collection, item, d)) From 21d36b7b4229c4d5225944f197918cde23fda4ea Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Thu, 26 Oct 2023 15:36:05 +0200 Subject: [PATCH 02/69] Removed TODO from test-case for hard-coded delivery fee estimation (#2042) Co-authored-by: Adrian Catangiu --- .../assets/asset-hub-rococo/tests/tests.rs | 38 +-- .../runtimes/assets/test-utils/src/lib.rs | 44 ++++ .../test-utils/src/test_cases_over_bridge.rs | 222 ++++++++++-------- 3 files changed, 164 insertions(+), 140 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index a763382f9054..b93315cc39d8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -683,6 +683,7 @@ mod asset_hub_rococo_tests { bridging_to_asset_hub_wococo, WeightLimit::Unlimited, Some(xcm_config::bridging::XcmBridgeHubRouterFeeAssetId::get()), + Some(xcm_config::TreasuryAccount::get().unwrap()), ) } @@ -717,29 +718,11 @@ mod asset_hub_rococo_tests { Runtime, AllPalletsWithoutSystem, XcmConfig, - ParachainSystem, - XcmpQueue, LocationToAccountId, ToWococoXcmRouterInstance, >( collator_session_keys(), - ExistentialDeposit::get(), - AccountId::from(ALICE), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), bridging_to_asset_hub_wococo, - WeightLimit::Unlimited, - Some(xcm_config::bridging::XcmBridgeHubRouterFeeAssetId::get()), || { sp_std::vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, @@ -888,6 +871,7 @@ mod asset_hub_wococo_tests { with_wococo_flavor_bridging_to_asset_hub_rococo, WeightLimit::Unlimited, Some(xcm_config::bridging::XcmBridgeHubRouterFeeAssetId::get()), + Some(xcm_config::TreasuryAccount::get().unwrap()), ) } @@ -922,29 +906,11 @@ mod asset_hub_wococo_tests { Runtime, AllPalletsWithoutSystem, XcmConfig, - ParachainSystem, - XcmpQueue, LocationToAccountId, ToRococoXcmRouterInstance, >( collator_session_keys(), - ExistentialDeposit::get(), - AccountId::from(ALICE), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), with_wococo_flavor_bridging_to_asset_hub_rococo, - WeightLimit::Unlimited, - Some(xcm_config::bridging::XcmBridgeHubRouterFeeAssetId::get()), || { sp_std::vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs index e0f05fa7b0a4..471b1f09b567 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs @@ -19,4 +19,48 @@ pub mod test_cases; pub mod test_cases_over_bridge; pub mod xcm_helpers; + +use frame_support::traits::ProcessMessageError; pub use parachains_runtimes_test_utils::*; +use std::fmt::Debug; + +use xcm::latest::prelude::*; +use xcm_builder::{CreateMatcher, MatchXcm}; + +/// Helper function to verify `xcm` contains all relevant instructions expected on destination +/// chain as part of a reserve-asset-transfer. +pub(crate) fn assert_matches_reserve_asset_deposited_instructions( + xcm: &mut Xcm, + expected_reserve_assets_deposited: &MultiAssets, + expected_beneficiary: &MultiLocation, +) { + let _ = xcm + .0 + .matcher() + .skip_inst_while(|inst| !matches!(inst, ReserveAssetDeposited(..))) + .expect("no instruction ReserveAssetDeposited?") + .match_next_inst(|instr| match instr { + ReserveAssetDeposited(reserve_assets) => { + assert_eq!(reserve_assets, expected_reserve_assets_deposited); + Ok(()) + }, + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction ReserveAssetDeposited") + .match_next_inst(|instr| match instr { + ClearOrigin => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction ClearOrigin") + .match_next_inst(|instr| match instr { + BuyExecution { .. } => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction BuyExecution") + .match_next_inst(|instr| match instr { + DepositAsset { assets: _, beneficiary } if beneficiary == expected_beneficiary => + Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction DepositAsset"); +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 8cc5f81f4972..6c8ac8c6452b 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -16,13 +16,12 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets transferred //! over a bridge. +use crate::assert_matches_reserve_asset_deposited_instructions; use codec::Encode; use cumulus_primitives_core::XcmpMessageSource; use frame_support::{ assert_ok, - traits::{ - fungible::Mutate, Currency, OnFinalize, OnInitialize, OriginTrait, ProcessMessageError, - }, + traits::{Currency, Get, OnFinalize, OnInitialize, OriginTrait, ProcessMessageError}, }; use frame_system::pallet_prelude::BlockNumberFor; use parachains_common::{AccountId, Balance}; @@ -30,10 +29,13 @@ use parachains_runtimes_test_utils::{ mock_open_hrmp_channel, AccountIdOf, BalanceOf, CollatorSessionKeys, ExtBuilder, RuntimeHelper, ValidatorIdOf, XcmReceivedFrom, }; -use sp_runtime::traits::StaticLookup; +use sp_runtime::{traits::StaticLookup, Saturating}; use xcm::{latest::prelude::*, VersionedMultiAssets}; use xcm_builder::{CreateMatcher, MatchXcm}; -use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +use xcm_executor::{ + traits::{ConvertLocation, TransactAsset}, + XcmExecutor, +}; pub struct TestBridgingConfig { pub bridged_network: NetworkId, @@ -61,6 +63,7 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< prepare_configuration: fn() -> TestBridgingConfig, weight_limit: WeightLimit, maybe_paid_export_message: Option, + delivery_fees_account: Option>, ) where Runtime: frame_system::Config + pallet_balances::Config @@ -151,6 +154,11 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< existential_deposit ); + let delivery_fees_account_balance_before = delivery_fees_account + .as_ref() + .map(|dfa| >::free_balance(dfa)) + .unwrap_or(0.into()); + // local native asset (pallet_balances) let asset_to_transfer = MultiAsset { fun: Fungible(balance_to_transfer.into()), @@ -166,22 +174,76 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< }), }; - // Make sure sender has enough funds for paying delivery fees - // TODO: Get this fee via weighing the corresponding message - let delivery_fees = 1324039894; - >::mint_into(&alice_account, delivery_fees.into()) + let assets_to_transfer = MultiAssets::from(asset_to_transfer); + let mut expected_assets = assets_to_transfer.clone(); + let context = XcmConfig::UniversalLocation::get(); + expected_assets + .reanchor(&target_location_from_different_consensus, context) .unwrap(); + let expected_beneficiary = target_destination_account; + + // Make sure sender has enough funds for paying delivery fees + let handling_delivery_fees = { + // Probable XCM with `ReserveAssetDeposited`. + let mut expected_reserve_asset_deposited_message = Xcm(vec![ + ReserveAssetDeposited(MultiAssets::from(expected_assets.clone())), + ClearOrigin, + BuyExecution { + fees: MultiAsset { + id: Concrete(Default::default()), + fun: Fungible(balance_to_transfer), + }, + weight_limit: Unlimited, + }, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary: expected_beneficiary }, + SetTopic([ + 220, 188, 144, 32, 213, 83, 111, 175, 44, 210, 111, 19, 90, 165, 191, 112, + 140, 247, 192, 124, 42, 17, 153, 141, 114, 34, 189, 20, 83, 69, 237, 173, + ]), + ]); + assert_matches_reserve_asset_deposited_instructions( + &mut expected_reserve_asset_deposited_message, + &expected_assets, + &expected_beneficiary, + ); + + // Call `SendXcm::validate` to get delivery fees. + let (_, delivery_fees): (_, MultiAssets) = XcmConfig::XcmSender::validate( + &mut Some(target_location_from_different_consensus), + &mut Some(expected_reserve_asset_deposited_message), + ) + .expect("validate passes"); + // Drip delivery fee to Alice account. + let mut delivery_fees_added = false; + for delivery_fee in delivery_fees.inner() { + assert_ok!(::deposit_asset( + &delivery_fee, + &MultiLocation { + parents: 0, + interior: X1(AccountId32 { + network: None, + id: alice_account.clone().into(), + }), + }, + None, + )); + delivery_fees_added = true; + } + delivery_fees_added + }; + // do pallet_xcm call reserve transfer assert_ok!(>::limited_reserve_transfer_assets( RuntimeHelper::::origin_of(alice_account.clone()), Box::new(target_location_from_different_consensus.into_versioned()), Box::new(target_destination_account.into_versioned()), - Box::new(VersionedMultiAssets::from(MultiAssets::from(asset_to_transfer))), + Box::new(VersionedMultiAssets::from(assets_to_transfer)), 0, weight_limit, )); + // check events // check pallet_xcm attempted RuntimeHelper::::assert_pallet_xcm_event_outcome( &unwrap_pallet_xcm_event, @@ -190,20 +252,6 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< }, ); - // check alice account decreased by balance_to_transfer - // TODO:check-parameter: change and assert in tests when (https://github.com/paritytech/polkadot-sdk/pull/1234) merged - assert_eq!( - >::free_balance(&alice_account), - alice_account_init_balance - balance_to_transfer.into() - ); - - // check reserve account - // check reserve account increased by balance_to_transfer - assert_eq!( - >::free_balance(&reserve_account), - existential_deposit + balance_to_transfer.into() - ); - // check that xcm was sent let xcm_sent_message_hash = >::events() .into_iter() @@ -219,7 +267,6 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< local_bridge_hub_para_id.into(), ) .unwrap(); - assert_eq!( xcm_sent_message_hash, Some(xcm_sent.using_encoded(sp_io::hashing::blake2_256)) @@ -268,12 +315,41 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< .split_global() .expect("split works"); assert_eq!(destination, &target_location_junctions_without_global_consensus); - assert_matches_pallet_xcm_reserve_transfer_assets_instructions(inner_xcm); + assert_matches_reserve_asset_deposited_instructions( + inner_xcm, + &expected_assets, + &expected_beneficiary, + ); Ok(()) }, _ => Err(ProcessMessageError::BadFormat), }) .expect("contains ExportMessage"); + + // check alice account decreased by balance_to_transfer + assert_eq!( + >::free_balance(&alice_account), + alice_account_init_balance + .saturating_sub(existential_deposit) + .saturating_sub(balance_to_transfer.into()) + ); + + // check reserve account increased by balance_to_transfer + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + balance_to_transfer.into() + ); + + // check dedicated account increased by delivery fees (if configured) + if handling_delivery_fees { + if let Some(delivery_fees_account) = delivery_fees_account { + let delivery_fees_account_balance_after = + >::free_balance(&delivery_fees_account); + assert!( + delivery_fees_account_balance_after > delivery_fees_account_balance_before + ); + } + } }) } @@ -405,15 +481,21 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< 0.into() ); + let expected_assets = MultiAssets::from(vec![MultiAsset { + id: Concrete(foreign_asset_id_multilocation), + fun: Fungible(transfered_foreign_asset_id_amount), + }]); + let expected_beneficiary = MultiLocation { + parents: 0, + interior: X1(AccountId32 { network: None, id: target_account.clone().into() }), + }; + // Call received XCM execution let xcm = Xcm(vec![ DescendOrigin(bridge_instance), UniversalOrigin(universal_origin), DescendOrigin(descend_origin), - ReserveAssetDeposited(MultiAssets::from(vec![MultiAsset { - id: Concrete(foreign_asset_id_multilocation), - fun: Fungible(transfered_foreign_asset_id_amount), - }])), + ReserveAssetDeposited(expected_assets.clone()), ClearOrigin, BuyExecution { fees: MultiAsset { @@ -422,22 +504,17 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< }, weight_limit: Unlimited, }, - DepositAsset { - assets: Wild(AllCounted(1)), - beneficiary: MultiLocation { - parents: 0, - interior: X1(AccountId32 { - network: None, - id: target_account.clone().into(), - }), - }, - }, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary: expected_beneficiary }, SetTopic([ 220, 188, 144, 32, 213, 83, 111, 175, 44, 210, 111, 19, 90, 165, 191, 112, 140, 247, 192, 124, 42, 17, 153, 141, 114, 34, 189, 20, 83, 69, 237, 173, ]), ]); - assert_matches_pallet_xcm_reserve_transfer_assets_instructions(&mut xcm.clone()); + assert_matches_reserve_asset_deposited_instructions( + &mut xcm.clone(), + &expected_assets, + &expected_beneficiary, + ); let hash = xcm.using_encoded(sp_io::hashing::blake2_256); @@ -498,55 +575,15 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< }) } -fn assert_matches_pallet_xcm_reserve_transfer_assets_instructions( - xcm: &mut Xcm, -) { - let _ = xcm - .0 - .matcher() - .skip_inst_while(|inst| !matches!(inst, ReserveAssetDeposited(..))) - .expect("no instruction ReserveAssetDeposited?") - .match_next_inst(|instr| match instr { - ReserveAssetDeposited(..) => Ok(()), - _ => Err(ProcessMessageError::BadFormat), - }) - .expect("expected instruction ReserveAssetDeposited") - .match_next_inst(|instr| match instr { - ClearOrigin => Ok(()), - _ => Err(ProcessMessageError::BadFormat), - }) - .expect("expected instruction ClearOrigin") - .match_next_inst(|instr| match instr { - BuyExecution { .. } => Ok(()), - _ => Err(ProcessMessageError::BadFormat), - }) - .expect("expected instruction BuyExecution") - .match_next_inst(|instr| match instr { - DepositAsset { .. } => Ok(()), - _ => Err(ProcessMessageError::BadFormat), - }) - .expect("expected instruction DepositAsset"); -} - pub fn report_bridge_status_from_xcm_bridge_router_works< Runtime, AllPalletsWithoutSystem, XcmConfig, - HrmpChannelOpener, - HrmpChannelSource, LocationToAccountId, XcmBridgeHubRouterInstance, >( collator_session_keys: CollatorSessionKeys, - existential_deposit: BalanceOf, - alice_account: AccountIdOf, - unwrap_pallet_xcm_event: Box) -> Option>>, - unwrap_xcmp_queue_event: Box< - dyn Fn(Vec) -> Option>, - >, prepare_configuration: fn() -> TestBridgingConfig, - weight_limit: WeightLimit, - maybe_paid_export_message: Option, congested_message: fn() -> Xcm, uncongested_message: fn() -> Xcm, ) where @@ -572,10 +609,6 @@ pub fn report_bridge_status_from_xcm_bridge_router_works< <::Lookup as StaticLookup>::Source: From<::AccountId>, ::AccountId: From, - HrmpChannelOpener: frame_support::inherent::ProvideInherent< - Call = cumulus_pallet_parachain_system::Call, - >, - HrmpChannelSource: XcmpMessageSource, XcmBridgeHubRouterInstance: 'static, { ExtBuilder::::default() @@ -584,25 +617,6 @@ pub fn report_bridge_status_from_xcm_bridge_router_works< .with_tracing() .build() .execute_with(|| { - // check transfer works - limited_reserve_transfer_assets_for_native_asset_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - HrmpChannelOpener, - HrmpChannelSource, - LocationToAccountId, - >( - collator_session_keys, - existential_deposit, - alice_account, - unwrap_pallet_xcm_event, - unwrap_xcmp_queue_event, - prepare_configuration, - weight_limit, - maybe_paid_export_message, - ); - let report_bridge_status = |is_congested: bool| { // prepare bridge config let TestBridgingConfig { local_bridge_hub_location, .. } = prepare_configuration(); From 1b08bdd2dd3f1c2fe623c6cc7df45714e6256362 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Thu, 26 Oct 2023 22:00:51 +0300 Subject: [PATCH 03/69] cumulus: fix test runtimes panic (#2039) the min slot duration should be 0 only if the `experimental` feature is enabled. otherwise, the runtime will panic on a division by 0. --- .../parachains/runtimes/glutton/glutton-kusama/src/lib.rs | 5 ++++- cumulus/parachains/runtimes/starters/seedling/src/lib.rs | 3 +++ cumulus/parachains/runtimes/starters/shell/src/lib.rs | 3 +++ substrate/frame/aura/src/lib.rs | 5 +++-- substrate/frame/babe/src/lib.rs | 5 +++-- 5 files changed, 16 insertions(+), 5 deletions(-) diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs index f5d52239e543..7f4f88fc0551 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs @@ -224,7 +224,10 @@ impl cumulus_pallet_aura_ext::Config for Runtime {} impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Aura; + #[cfg(feature = "experimental")] type MinimumPeriod = ConstU64<0>; + #[cfg(not(feature = "experimental"))] + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; type WeightInfo = weights::pallet_timestamp::WeightInfo; } @@ -355,7 +358,7 @@ impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) } fn authorities() -> Vec { diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index c2bcaf8a1266..9efbf5631235 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -216,7 +216,10 @@ impl pallet_aura::Config for Runtime { impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Aura; + #[cfg(feature = "experimental")] type MinimumPeriod = ConstU64<0>; + #[cfg(not(feature = "experimental"))] + type MinimumPeriod = ConstU64<{ parachains_common::SLOT_DURATION / 2 }>; type WeightInfo = (); } diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 4aad553e6a3b..18c1fcbb4baa 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -219,7 +219,10 @@ impl pallet_aura::Config for Runtime { impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Aura; + #[cfg(feature = "experimental")] type MinimumPeriod = ConstU64<0>; + #[cfg(not(feature = "experimental"))] + type MinimumPeriod = ConstU64<{ parachains_common::SLOT_DURATION / 2 }>; type WeightInfo = (); } diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index b314a3601e15..3017120e4e6d 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -408,8 +408,9 @@ impl OnTimestampSet for Pallet { let timestamp_slot = moment / slot_duration; let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); - assert!( - CurrentSlot::::get() == timestamp_slot, + assert_eq!( + CurrentSlot::::get(), + timestamp_slot, "Timestamp slot must match `CurrentSlot`" ); } diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index 4b99cd517968..57e1dbb6b53c 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -900,8 +900,9 @@ impl OnTimestampSet for Pallet { let timestamp_slot = moment / slot_duration; let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); - assert!( - CurrentSlot::::get() == timestamp_slot, + assert_eq!( + CurrentSlot::::get(), + timestamp_slot, "Timestamp slot must match `CurrentSlot`" ); } From 42707bc98b4d0921087c8abbff7ca115cf7e4a21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 26 Oct 2023 21:53:15 +0200 Subject: [PATCH 04/69] sp-version: Improve the docs (#2027) Co-authored-by: ordian --- substrate/primitives/version/src/lib.rs | 47 +++++++++++++++++-------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index bd8408bb4a48..13f4520f6e69 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -175,17 +175,24 @@ pub struct RuntimeVersion { /// will not attempt to author blocks unless this is equal to its native runtime. pub authoring_version: u32, - /// Version of the runtime specification. A full-node will not attempt to use its native - /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, - /// `spec_version` and `authoring_version` are the same between Wasm and native. + /// Version of the runtime specification. + /// + /// A full-node will not attempt to use its native runtime in substitute for the on-chain + /// Wasm runtime unless all of `spec_name`, `spec_version` and `authoring_version` are the same + /// between Wasm and native. + /// + /// This number should never decrease. pub spec_version: u32, - /// Version of the implementation of the specification. Nodes are free to ignore this; it - /// serves only as an indication that the code is different; as long as the other two versions - /// are the same then while the actual code may be different, it is nonetheless required to - /// do the same thing. - /// Non-consensus-breaking optimizations are about the only changes that could be made which - /// would result in only the `impl_version` changing. + /// Version of the implementation of the specification. + /// + /// Nodes are free to ignore this; it serves only as an indication that the code is different; + /// as long as the other two versions are the same then while the actual code may be different, + /// it is nonetheless required to do the same thing. Non-consensus-breaking optimizations are + /// about the only changes that could be made which would result in only the `impl_version` + /// changing. + /// + /// This number can be reverted to `0` after a [`spec_version`](Self::spec_version) bump. pub impl_version: u32, /// List of supported API "features" along with their versions. @@ -198,15 +205,25 @@ pub struct RuntimeVersion { )] pub apis: ApisVec, - /// All existing dispatches are fully compatible when this number doesn't change. If this - /// number changes, then `spec_version` must change, also. + /// All existing calls (dispatchables) are fully compatible when this number doesn't change. If + /// this number changes, then [`spec_version`](Self::spec_version) must change, also. /// - /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, + /// This number must change when an existing call (pallet index, call index) is changed, /// either through an alteration in its user-level semantics, a parameter - /// added/removed/changed, a dispatchable being removed, a module being removed, or a - /// dispatchable/module changing its index. + /// added/removed, a parameter type changed, or a call/pallet changing its index. An alteration + /// of the user level semantics is for example when the call was before `transfer` and now is + /// `transfer_all`, the semantics of the call changed completely. + /// + /// Removing a pallet or a call doesn't require a *bump* as long as no pallet or call is put at + /// the same index. Removing doesn't require a bump as the chain will reject a transaction + /// referencing this removed call/pallet while decoding and thus, the user isn't at risk to + /// execute any unknown call. FRAME runtime devs have control over the index of a call/pallet + /// to prevent that an index gets reused. + /// + /// Adding a new pallet or call also doesn't require a *bump* as long as they also don't reuse + /// any previously used index. /// - /// It need *not* change when a new module is added or when a dispatchable is added. + /// This number should never decrease. pub transaction_version: u32, /// Version of the state implementation used by this runtime. From 86228fa45e4c64642f7210cf44c40cc84ae17537 Mon Sep 17 00:00:00 2001 From: yjh Date: Fri, 27 Oct 2023 03:55:49 +0800 Subject: [PATCH 05/69] add `authorities_len` for aura (#2040) --- substrate/frame/aura/src/lib.rs | 7 ++++++- substrate/frame/aura/src/tests.rs | 3 ++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index 3017120e4e6d..f7506db05d1d 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -230,6 +230,11 @@ impl Pallet { } } + /// Return current authorities length. + pub fn authorities_len() -> usize { + Authorities::::decode_len().unwrap_or(0) + } + /// Get the current slot from the pre-runtime digests. fn current_slot_from_digests() -> Option { let digest = frame_system::Pallet::::digest(); @@ -363,7 +368,7 @@ impl FindAuthor for Pallet { for (id, mut data) in digests.into_iter() { if id == AURA_ENGINE_ID { let slot = Slot::decode(&mut data).ok()?; - let author_index = *slot % Self::authorities().len() as u64; + let author_index = *slot % Self::authorities_len() as u64; return Some(author_index as u32) } } diff --git a/substrate/frame/aura/src/tests.rs b/substrate/frame/aura/src/tests.rs index d3ce877d3e60..b3a5e144fad8 100644 --- a/substrate/frame/aura/src/tests.rs +++ b/substrate/frame/aura/src/tests.rs @@ -29,7 +29,8 @@ use sp_runtime::{Digest, DigestItem}; fn initial_values() { build_ext_and_execute_test(vec![0, 1, 2, 3], || { assert_eq!(Aura::current_slot(), 0u64); - assert_eq!(Aura::authorities().len(), 4); + assert_eq!(Aura::authorities().len(), Aura::authorities_len()); + assert_eq!(Aura::authorities_len(), 4); }); } From 6ca5789db85bd22b1ab18b6bfede6b56c305dd53 Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Fri, 27 Oct 2023 04:54:56 -0400 Subject: [PATCH 06/69] upgrade docify to 0.2.5 (#2052) Updates `docify` to 0.2.5, which fixes some indentation bugs and adds the new `#[docify::export_content]` attribute which can be used like regular `#[docify::export]` but will only export the _underlying contents_ of the item it is attached to, if applicable (otherwise it just behaves exactly like `#[docify::export]`). Release notes here: https://github.com/sam0x17/docify/releases/tag/v0.2.5 cc @kianenigma --- Cargo.lock | 8 ++++---- substrate/frame/bags-list/Cargo.toml | 2 +- substrate/frame/fast-unstake/Cargo.toml | 2 +- substrate/frame/paged-list/Cargo.toml | 2 +- substrate/frame/scheduler/Cargo.toml | 2 +- substrate/frame/sudo/Cargo.toml | 2 +- substrate/frame/support/Cargo.toml | 2 +- substrate/frame/timestamp/Cargo.toml | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7208b77e1098..0facc7e18cf2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4538,18 +4538,18 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ee528c501ddd15d5181997e9518e59024844eac44fd1e40cb20ddb2a8562fa" +checksum = "80bf86c286159ed2d70e9ff5c4de69b793ab8632c8a1d276d44bbff36f052f64" dependencies = [ "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca01728ab2679c464242eca99f94e2ce0514b52ac9ad950e2ed03fca991231c" +checksum = "2b5ac3bdcdc56f2317e51884a90bd6f595febd6d029cdb75174162107072a8a3" dependencies = [ "common-path", "derive-syn-parse", diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 5f8f31c192b2..f6b8335b311d 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -27,7 +27,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau # third party log = { version = "0.4.17", default-features = false } -docify = "0.2.4" +docify = "0.2.5" aquamarine = { version = "0.3.2" } # Optional imports for benchmarking diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index 832369e5b58f..ad502f03d187 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -27,7 +27,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -docify = "0.2.4" +docify = "0.2.5" [dev-dependencies] pallet-staking-reward-curve = { path = "../staking/reward-curve" } diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index f0e439081e97..194201b715c0 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } -docify = "0.2.4" +docify = "0.2.5" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index d307cc878542..6f8f59738853 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -20,7 +20,7 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} sp-weights = { path = "../../primitives/weights", default-features = false} -docify = "0.2.4" +docify = "0.2.5" [dev-dependencies] pallet-preimage = { path = "../preimage" } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index 25f10448d923..5663dc0dea8c 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -22,7 +22,7 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} -docify = "0.2.4" +docify = "0.2.5" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 5caf993bb35a..e6edaf22f108 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -43,7 +43,7 @@ k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features=false} serde_json = { version = "1.0.107", default-features = false, features = ["alloc"] } -docify = "0.2.4" +docify = "0.2.5" static_assertions = "1.1.0" aquamarine = { version = "0.3.2" } diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index f0b4d0ce65bf..a4d0ec087622 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -27,7 +27,7 @@ sp-std = { path = "../../primitives/std", default-features = false} sp-storage = { path = "../../primitives/storage", default-features = false} sp-timestamp = { path = "../../primitives/timestamp", default-features = false} -docify = "0.2.4" +docify = "0.2.5" [dev-dependencies] sp-core = { path = "../../primitives/core" } From 43415ef58c143b985e09015cd000dbd65f6d3997 Mon Sep 17 00:00:00 2001 From: Juan Girini Date: Fri, 27 Oct 2023 11:38:16 +0200 Subject: [PATCH 07/69] feat: FRAME umbrella crate. (#1337) ### Original PR https://github.com/paritytech/substrate/pull/14137 This PR brings in the first version of the "_`frame` umbrella crate_". This crate is intended to serve two purposes: 1. documentation 2. easier development with frame. Ideally, we want most users to be able to build a frame-based pallet and runtime using just `frame` (plus `scale-codec` and `scale-info`). The crate is not finalized and is not yet intended for external use. Therefore, the version is set to `0.0.1-dev`, this PR is `silent`, and the entire crate is hidden behind the `experimental` flag. The main intention in merging it early on is to be able to iterate on it in the rest of [`developer-hub`](https://github.com/paritytech/polkadot-sdk-docs/) efforts. The public API of the `frame` crate is at the moment as follows: ``` pub mod frame pub use frame::log pub use frame::pallet pub mod frame::arithmetic pub use frame::arithmetic::<> pub use frame::arithmetic::<> pub mod frame::deps pub use frame::deps::codec pub use frame::deps::frame_executive pub use frame::deps::frame_support pub use frame::deps::frame_system pub use frame::deps::scale_info pub use frame::deps::sp_api pub use frame::deps::sp_arithmetic pub use frame::deps::sp_block_builder pub use frame::deps::sp_consensus_aura pub use frame::deps::sp_consensus_grandpa pub use frame::deps::sp_core pub use frame::deps::sp_inherents pub use frame::deps::sp_io pub use frame::deps::sp_offchain pub use frame::deps::sp_runtime pub use frame::deps::sp_std pub use frame::deps::sp_version pub mod frame::derive pub use frame::derive::CloneNoBound pub use frame::derive::Debug pub use frame::derive::Debug pub use frame::derive::DebugNoBound pub use frame::derive::Decode pub use frame::derive::Decode pub use frame::derive::DefaultNoBound pub use frame::derive::Encode pub use frame::derive::Encode pub use frame::derive::EqNoBound pub use frame::derive::PartialEqNoBound pub use frame::derive::RuntimeDebug pub use frame::derive::RuntimeDebugNoBound pub use frame::derive::TypeInfo pub use frame::derive::TypeInfo pub mod frame::prelude pub use frame::prelude::<> pub use frame::prelude::<> pub use frame::prelude::<> pub use frame::prelude::CloneNoBound pub use frame::prelude::Debug pub use frame::prelude::Debug pub use frame::prelude::DebugNoBound pub use frame::prelude::Decode pub use frame::prelude::Decode pub use frame::prelude::DefaultNoBound pub use frame::prelude::Encode pub use frame::prelude::Encode pub use frame::prelude::EqNoBound pub use frame::prelude::PartialEqNoBound pub use frame::prelude::RuntimeDebug pub use frame::prelude::RuntimeDebugNoBound pub use frame::prelude::TypeInfo pub use frame::prelude::TypeInfo pub use frame::prelude::frame_system pub mod frame::primitives pub use frame::primitives::BlakeTwo256 pub use frame::primitives::H160 pub use frame::primitives::H256 pub use frame::primitives::H512 pub use frame::primitives::Hash pub use frame::primitives::Keccak256 pub use frame::primitives::U256 pub use frame::primitives::U512 pub mod frame::runtime pub mod frame::runtime::apis pub use frame::runtime::apis::<> pub use frame::runtime::apis::<> pub use frame::runtime::apis::<> pub use frame::runtime::apis::<> pub use frame::runtime::apis::<> pub use frame::runtime::apis::<> pub use frame::runtime::apis::<> pub use frame::runtime::apis::<> pub use frame::runtime::apis::ApplyExtrinsicResult pub use frame::runtime::apis::CheckInherentsResult pub use frame::runtime::apis::InherentData pub use frame::runtime::apis::OpaqueMetadata pub use frame::runtime::apis::impl_runtime_apis pub use frame::runtime::apis::sp_api pub mod frame::runtime::prelude pub use frame::runtime::prelude::<> pub use frame::runtime::prelude::ConstBool pub use frame::runtime::prelude::ConstI128 pub use frame::runtime::prelude::ConstI16 pub use frame::runtime::prelude::ConstI32 pub use frame::runtime::prelude::ConstI64 pub use frame::runtime::prelude::ConstI8 pub use frame::runtime::prelude::ConstU128 pub use frame::runtime::prelude::ConstU16 pub use frame::runtime::prelude::ConstU32 pub use frame::runtime::prelude::ConstU64 pub use frame::runtime::prelude::ConstU8 pub use frame::runtime::prelude::NativeVersion pub use frame::runtime::prelude::RuntimeVersion pub use frame::runtime::prelude::construct_runtime pub use frame::runtime::prelude::create_runtime_str pub use frame::runtime::prelude::derive_impl pub use frame::runtime::prelude::frame_support pub use frame::runtime::prelude::ord_parameter_types pub use frame::runtime::prelude::parameter_types pub use frame::runtime::prelude::runtime_version pub mod frame::runtime::testing_prelude pub use frame::runtime::testing_prelude::BuildStorage pub use frame::runtime::testing_prelude::Storage pub mod frame::runtime::types_common pub type frame::runtime::types_common::AccountId = <::Signer as sp_runtime::traits::IdentifyAccount>::AccountId pub type frame::runtime::types_common::BlockNumber = u32 pub type frame::runtime::types_common::BlockOf = sp_runtime::generic::block::Block, sp_runtime::generic::unchecked_extrinsic::UncheckedExtrinsic, ::RuntimeCall, frame::runtime::types_common::Signature, Extra>> pub type frame::runtime::types_common::OpaqueBlock = sp_runtime::generic::block::Block, sp_runtime::OpaqueExtrinsic> pub type frame::runtime::types_common::Signature = sp_runtime::MultiSignature pub type frame::runtime::types_common::SystemSignedExtensionsOf = (frame_system::extensions::check_non_zero_sender::CheckNonZeroSender, frame_system::extensions::check_spec_version::CheckSpecVersion, frame_system::extensions::check_tx_version::CheckTxVersion, frame_system::extensions::check_genesis::CheckGenesis, frame_system::extensions::check_mortality::CheckMortality, frame_system::extensions::check_nonce::CheckNonce, frame_system::extensions::check_weight::CheckWeight) pub mod frame::testing_prelude pub use frame::testing_prelude::<> pub use frame::testing_prelude::<> pub use frame::testing_prelude::BuildStorage pub use frame::testing_prelude::ConstBool pub use frame::testing_prelude::ConstI128 pub use frame::testing_prelude::ConstI16 pub use frame::testing_prelude::ConstI32 pub use frame::testing_prelude::ConstI64 pub use frame::testing_prelude::ConstI8 pub use frame::testing_prelude::ConstU128 pub use frame::testing_prelude::ConstU16 pub use frame::testing_prelude::ConstU32 pub use frame::testing_prelude::ConstU64 pub use frame::testing_prelude::ConstU8 pub use frame::testing_prelude::NativeVersion pub use frame::testing_prelude::RuntimeVersion pub use frame::testing_prelude::Storage pub use frame::testing_prelude::TestState pub use frame::testing_prelude::assert_err pub use frame::testing_prelude::assert_err_ignore_postinfo pub use frame::testing_prelude::assert_error_encoded_size pub use frame::testing_prelude::assert_noop pub use frame::testing_prelude::assert_ok pub use frame::testing_prelude::assert_storage_noop pub use frame::testing_prelude::construct_runtime pub use frame::testing_prelude::create_runtime_str pub use frame::testing_prelude::derive_impl pub use frame::testing_prelude::frame_support pub use frame::testing_prelude::frame_system pub use frame::testing_prelude::if_std pub use frame::testing_prelude::ord_parameter_types pub use frame::testing_prelude::parameter_types pub use frame::testing_prelude::runtime_version pub use frame::testing_prelude::storage_alias pub mod frame::traits pub use frame::traits::<> pub use frame::traits::<> ``` --- The road to full stabilization is - [ ] https://github.com/paritytech/polkadot-sdk/issues/127 - [ ] have a more intentional version bump, as opposed to the current bi weekly force-major-bump - [ ] revise the internal API of `frame`, especially what goes into the `prelude`s. - [ ] migrate all internal pallets and runtime to use `frame` --------- Co-authored-by: kianenigma Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Francisco Aguirre --- Cargo.lock | 128 ++++++- Cargo.toml | 4 +- docs/mermaid/substrate_client_runtime.mmd | 10 + docs/mermaid/substrate_simple.mmd | 8 + docs/mermaid/substrate_with_frame.mmd | 20 + substrate/Cargo.toml | 2 +- substrate/bin/minimal/node/Cargo.toml | 56 +++ substrate/bin/minimal/node/build.rs | 23 ++ substrate/bin/minimal/node/src/chain_spec.rs | 66 ++++ substrate/bin/minimal/node/src/cli.rs | 81 ++++ substrate/bin/minimal/node/src/command.rs | 126 +++++++ .../frame => bin/minimal/node}/src/lib.rs | 8 +- substrate/bin/minimal/node/src/main.rs | 30 ++ substrate/bin/minimal/node/src/rpc.rs | 66 ++++ substrate/bin/minimal/node/src/service.rs | 254 +++++++++++++ substrate/bin/minimal/runtime/Cargo.toml | 36 ++ substrate/bin/minimal/runtime/build.rs | 27 ++ substrate/bin/minimal/runtime/src/lib.rs | 231 ++++++++++++ .../client/consensus/manual-seal/src/lib.rs | 2 +- substrate/frame/Cargo.toml | 91 +++++ substrate/frame/examples/Cargo.toml | 8 +- .../frame/examples/frame-crate/Cargo.toml | 24 ++ .../frame/examples/frame-crate/src/lib.rs | 66 ++++ substrate/frame/examples/src/lib.rs | 3 + substrate/frame/src/lib.rs | 347 ++++++++++++++++++ substrate/frame/sudo/src/lib.rs | 24 +- substrate/frame/sudo/src/mock.rs | 4 +- substrate/frame/support/src/lib.rs | 4 + .../support/test/stg_frame_crate/Cargo.toml | 2 +- .../test/stg_frame_crate/frame/Cargo.toml | 20 - ...event_type_invalid_bound_no_frame_crate.rs | 45 +++ ...t_type_invalid_bound_no_frame_crate.stderr | 5 + substrate/frame/system/Cargo.toml | 1 + substrate/frame/system/src/lib.rs | 54 +++ substrate/frame/timestamp/src/lib.rs | 27 +- .../frame/transaction-payment/src/lib.rs | 47 ++- .../primitives/api/proc-macro/src/utils.rs | 28 +- substrate/primitives/core/src/crypto.rs | 3 +- substrate/primitives/keyring/src/ed25519.rs | 12 + substrate/primitives/keyring/src/lib.rs | 5 +- substrate/primitives/keyring/src/sr25519.rs | 12 + .../src/generic/unchecked_extrinsic.rs | 8 +- substrate/primitives/runtime/src/lib.rs | 2 +- substrate/primitives/runtime/src/traits.rs | 4 +- substrate/primitives/session/src/lib.rs | 22 +- .../primitives/session/src/runtime_api.rs | 38 ++ substrate/primitives/weights/src/lib.rs | 18 + substrate/src/lib.rs | 109 ++---- 48 files changed, 2019 insertions(+), 192 deletions(-) create mode 100644 docs/mermaid/substrate_client_runtime.mmd create mode 100644 docs/mermaid/substrate_simple.mmd create mode 100644 docs/mermaid/substrate_with_frame.mmd create mode 100644 substrate/bin/minimal/node/Cargo.toml create mode 100644 substrate/bin/minimal/node/build.rs create mode 100644 substrate/bin/minimal/node/src/chain_spec.rs create mode 100644 substrate/bin/minimal/node/src/cli.rs create mode 100644 substrate/bin/minimal/node/src/command.rs rename substrate/{frame/support/test/stg_frame_crate/frame => bin/minimal/node}/src/lib.rs (90%) create mode 100644 substrate/bin/minimal/node/src/main.rs create mode 100644 substrate/bin/minimal/node/src/rpc.rs create mode 100644 substrate/bin/minimal/node/src/service.rs create mode 100644 substrate/bin/minimal/runtime/Cargo.toml create mode 100644 substrate/bin/minimal/runtime/build.rs create mode 100644 substrate/bin/minimal/runtime/src/lib.rs create mode 100644 substrate/frame/Cargo.toml create mode 100644 substrate/frame/examples/frame-crate/Cargo.toml create mode 100644 substrate/frame/examples/frame-crate/src/lib.rs create mode 100644 substrate/frame/src/lib.rs delete mode 100644 substrate/frame/support/test/stg_frame_crate/frame/Cargo.toml create mode 100644 substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr create mode 100644 substrate/primitives/session/src/runtime_api.rs diff --git a/Cargo.lock b/Cargo.lock index 0facc7e18cf2..6c65b1392535 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -575,7 +575,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time", + "time 0.3.27", ] [[package]] @@ -591,7 +591,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time", + "time 0.3.27", ] [[package]] @@ -2434,14 +2434,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.30" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" +checksum = "f56b4c72906975ca04becb8a30e102dfecddd0c06181e3e95ddc444be28881f8" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", + "time 0.1.45", "wasm-bindgen", "windows-targets 0.48.5", ] @@ -5192,10 +5193,32 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame" -version = "0.1.0" +version = "0.0.1-dev" dependencies = [ + "docify", + "frame-executive", "frame-support", "frame-system", + "frame-system-rpc-runtime-api", + "log", + "pallet-examples", + "parity-scale-codec", + "scale-info", + "simple-mermaid", + "sp-api", + "sp-arithmetic", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", ] [[package]] @@ -7933,6 +7956,55 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +[[package]] +name = "minimal-node" +version = "4.0.0-dev" +dependencies = [ + "clap 4.4.6", + "frame", + "futures", + "futures-timer", + "jsonrpsee", + "minimal-runtime", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-manual-seal", + "sc-executor", + "sc-network", + "sc-offchain", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", +] + +[[package]] +name = "minimal-runtime" +version = "0.1.0" +dependencies = [ + "frame", + "pallet-balances", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "substrate-wasm-builder", +] + [[package]] name = "miniz_oxide" version = "0.7.1" @@ -9695,6 +9767,15 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-example-frame-crate" +version = "0.0.1" +dependencies = [ + "frame", + "parity-scale-codec", + "scale-info", +] + [[package]] name = "pallet-example-kitchensink" version = "4.0.0-dev" @@ -9751,6 +9832,7 @@ dependencies = [ "pallet-default-config-example", "pallet-dev-mode", "pallet-example-basic", + "pallet-example-frame-crate", "pallet-example-kitchensink", "pallet-example-offchain-worker", "pallet-example-split", @@ -13796,7 +13878,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring 0.16.20", - "time", + "time 0.3.27", "x509-parser 0.13.2", "yasna", ] @@ -13809,7 +13891,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring 0.16.20", - "time", + "time 0.3.27", "yasna", ] @@ -16497,6 +16579,11 @@ dependencies = [ "wide", ] +[[package]] +name = "simple-mermaid" +version = "0.1.0" +source = "git+https://github.com/kianenigma/simple-mermaid.git?rev=e48b187bcfd5cc75111acd9d241f1bd36604344b#e48b187bcfd5cc75111acd9d241f1bd36604344b" + [[package]] name = "siphasher" version = "0.3.11" @@ -17843,7 +17930,6 @@ dependencies = [ name = "substrate" version = "1.0.0" dependencies = [ - "aquamarine", "chain-spec-builder", "frame-support", "sc-cli", @@ -17854,6 +17940,7 @@ dependencies = [ "sc-consensus-manual-seal", "sc-consensus-pow", "sc-service", + "simple-mermaid", "sp-runtime", "subkey", ] @@ -18570,6 +18657,17 @@ dependencies = [ "tikv-jemalloc-sys", ] +[[package]] +name = "time" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + [[package]] name = "time" version = "0.3.27" @@ -19422,6 +19520,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -19924,7 +20028,7 @@ dependencies = [ "sha2 0.10.7", "stun", "thiserror", - "time", + "time 0.3.27", "tokio", "turn", "url", @@ -20539,7 +20643,7 @@ dependencies = [ "ring 0.16.20", "rusticata-macros", "thiserror", - "time", + "time 0.3.27", ] [[package]] @@ -20557,7 +20661,7 @@ dependencies = [ "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time", + "time 0.3.27", ] [[package]] @@ -20727,7 +20831,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time", + "time 0.3.27", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c98fe6d1a3ac..66271139dfd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -179,6 +179,8 @@ members = [ "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", + "substrate/bin/minimal/node", + "substrate/bin/minimal/runtime", "substrate", "substrate/bin/node-template/node", "substrate/bin/node-template/pallets/template", @@ -251,6 +253,7 @@ members = [ "substrate/client/transaction-pool", "substrate/client/transaction-pool/api", "substrate/client/utils", + "substrate/frame", "substrate/frame/alliance", "substrate/frame/asset-conversion", "substrate/frame/asset-rate", @@ -350,7 +353,6 @@ members = [ "substrate/frame/support/test", "substrate/frame/support/test/compile_pass", "substrate/frame/support/test/pallet", - "substrate/frame/support/test/stg_frame_crate/frame", "substrate/frame/support/test/stg_frame_crate", "substrate/frame/system", "substrate/frame/system/benchmarking", diff --git a/docs/mermaid/substrate_client_runtime.mmd b/docs/mermaid/substrate_client_runtime.mmd new file mode 100644 index 000000000000..23c3f849224a --- /dev/null +++ b/docs/mermaid/substrate_client_runtime.mmd @@ -0,0 +1,10 @@ +graph TB +subgraph Substrate + direction LR + subgraph Client + end + subgraph Runtime + end + Client --runtime-api--> Runtime + Runtime --host-functions--> Client +end diff --git a/docs/mermaid/substrate_simple.mmd b/docs/mermaid/substrate_simple.mmd new file mode 100644 index 000000000000..475d8be5ef81 --- /dev/null +++ b/docs/mermaid/substrate_simple.mmd @@ -0,0 +1,8 @@ +graph TB +subgraph Substrate + direction LR + subgraph Client + end + subgraph Runtime + end +end diff --git a/docs/mermaid/substrate_with_frame.mmd b/docs/mermaid/substrate_with_frame.mmd new file mode 100644 index 000000000000..12d072a3360c --- /dev/null +++ b/docs/mermaid/substrate_with_frame.mmd @@ -0,0 +1,20 @@ +graph TB +subgraph Substrate + direction LR + subgraph Client + Database + Networking + Consensus + end + subgraph Runtime + subgraph FRAME + direction LR + Governance + Currency + Staking + Identity + end + end + Client --runtime-api--> Runtime + Runtime --host-functions--> Client +end diff --git a/substrate/Cargo.toml b/substrate/Cargo.toml index d77f02c60603..9e2e0b1a6eec 100644 --- a/substrate/Cargo.toml +++ b/substrate/Cargo.toml @@ -10,7 +10,7 @@ version = "1.0.0" # The dependencies are only needed for docs. [dependencies] -aquamarine = "0.3.2" +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", rev = "e48b187bcfd5cc75111acd9d241f1bd36604344b" } subkey = { path = "bin/utils/subkey" } chain-spec-builder = { path = "bin/utils/chain-spec-builder" } diff --git a/substrate/bin/minimal/node/Cargo.toml b/substrate/bin/minimal/node/Cargo.toml new file mode 100644 index 000000000000..11ce98eec0dd --- /dev/null +++ b/substrate/bin/minimal/node/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "minimal-node" +version = "4.0.0-dev" +description = "A fresh FRAME-based Substrate node, ready for hacking." +authors = ["Substrate DevHub "] +homepage = "https://substrate.io/" +edition = "2021" +license = "MIT-0" +publish = false +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" +build = "build.rs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[[bin]] +name = "minimal-node" + +[dependencies] +clap = { version = "4.0.9", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } +futures-timer = "3.0.1" +jsonrpsee = { version = "0.16.2", features = ["server"] } + +sc-cli = { path = "../../../client/cli" } +sc-executor = { path = "../../../client/executor" } +sc-network = { path = "../../../client/network" } +sc-service = { path = "../../../client/service" } +sc-telemetry = { path = "../../../client/telemetry" } +sc-transaction-pool = { path = "../../../client/transaction-pool" } +sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } +sc-consensus = { path = "../../../client/consensus/common" } +sc-consensus-manual-seal = { path = "../../../client/consensus/manual-seal" } +sc-rpc-api = { path = "../../../client/rpc-api" } +sc-basic-authorship = { path = "../../../client/basic-authorship" } +sc-offchain = { path = "../../../client/offchain" } +sc-client-api = { path = "../../../client/api" } + +sp-timestamp = { path = "../../../primitives/timestamp" } +sp-keyring = { path = "../../../primitives/keyring" } +sp-api = { path = "../../../primitives/api" } +sp-blockchain = { path = "../../../primitives/blockchain" } +sp-block-builder = { path = "../../../primitives/block-builder" } +sp-io = { path = "../../../primitives/io" } +sp-runtime = { path = "../../../primitives/runtime" } + +substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } + +frame = { path = "../../../frame", features = ["runtime", "experimental"] } +runtime = { package = "minimal-runtime", path = "../runtime" } + +[build-dependencies] +substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } + +[features] +default = [] diff --git a/substrate/bin/minimal/node/build.rs b/substrate/bin/minimal/node/build.rs new file mode 100644 index 000000000000..fa7686e01099 --- /dev/null +++ b/substrate/bin/minimal/node/build.rs @@ -0,0 +1,23 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; + +fn main() { + generate_cargo_keys(); + rerun_if_git_head_changed(); +} diff --git a/substrate/bin/minimal/node/src/chain_spec.rs b/substrate/bin/minimal/node/src/chain_spec.rs new file mode 100644 index 000000000000..91fa9ef45202 --- /dev/null +++ b/substrate/bin/minimal/node/src/chain_spec.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use runtime::{BalancesConfig, RuntimeGenesisConfig, SudoConfig, SystemConfig, WASM_BINARY}; +use sc_service::{ChainType, Properties}; +use sp_keyring::AccountKeyring; + +/// This is a specialization of the general Substrate ChainSpec type. +pub type ChainSpec = sc_service::GenericChainSpec; + +fn props() -> Properties { + let mut properties = Properties::new(); + properties.insert("tokenDecimals".to_string(), 0.into()); + properties.insert("tokenSymbol".to_string(), "MINI".into()); + properties +} + +pub fn development_config() -> Result { + let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; + Ok(ChainSpec::from_genesis( + "Development", + "dev", + ChainType::Development, + move || testnet_genesis(wasm_binary), + vec![], + None, + None, + None, + Some(props()), + None, + )) +} + +/// Configure initial storage state for FRAME pallets. +fn testnet_genesis(wasm_binary: &[u8]) -> RuntimeGenesisConfig { + use frame::traits::Get; + use runtime::interface::{Balance, MinimumBalance}; + let endowment = >::get().max(1) * 1000; + let balances = AccountKeyring::iter() + .map(|a| (a.to_account_id(), endowment)) + .collect::>(); + RuntimeGenesisConfig { + system: SystemConfig { + // Add Wasm runtime to storage. + code: wasm_binary.to_vec(), + _config: Default::default(), + }, + balances: BalancesConfig { balances }, + sudo: SudoConfig { key: Some(AccountKeyring::Alice.to_account_id()) }, + ..Default::default() + } +} diff --git a/substrate/bin/minimal/node/src/cli.rs b/substrate/bin/minimal/node/src/cli.rs new file mode 100644 index 000000000000..e464fa7d6caa --- /dev/null +++ b/substrate/bin/minimal/node/src/cli.rs @@ -0,0 +1,81 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sc_cli::RunCmd; + +#[derive(Debug, Clone)] +pub enum Consensus { + ManualSeal(u64), + InstantSeal, +} + +impl std::str::FromStr for Consensus { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(if s == "instant-seal" { + Consensus::InstantSeal + } else if let Some(block_time) = s.strip_prefix("manual-seal-") { + Consensus::ManualSeal(block_time.parse().map_err(|_| "invalid block time")?) + } else { + return Err("incorrect consensus identifier".into()) + }) + } +} + +#[derive(Debug, clap::Parser)] +pub struct Cli { + #[command(subcommand)] + pub subcommand: Option, + + #[clap(long, default_value = "manual-seal-3000")] + pub consensus: Consensus, + + #[clap(flatten)] + pub run: RunCmd, +} + +#[derive(Debug, clap::Subcommand)] +pub enum Subcommand { + /// Key management cli utilities + #[command(subcommand)] + Key(sc_cli::KeySubcommand), + + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(sc_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), + + /// Db meta columns information. + ChainInfo(sc_cli::ChainInfoCmd), +} diff --git a/substrate/bin/minimal/node/src/command.rs b/substrate/bin/minimal/node/src/command.rs new file mode 100644 index 000000000000..a985370c2d87 --- /dev/null +++ b/substrate/bin/minimal/node/src/command.rs @@ -0,0 +1,126 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + chain_spec, + cli::{Cli, Subcommand}, + service, +}; +use sc_cli::SubstrateCli; +use sc_service::PartialComponents; + +#[cfg(feature = "try-runtime")] +use try_runtime_cli::block_building_info::timestamp_with_aura_info; + +impl SubstrateCli for Cli { + fn impl_name() -> String { + "Substrate Node".into() + } + + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() + } + + fn description() -> String { + env!("CARGO_PKG_DESCRIPTION").into() + } + + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() + } + + fn support_url() -> String { + "support.anonymous.an".into() + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn load_spec(&self, id: &str) -> Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()?), + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + }) + } +} + +/// Parse and run command line arguments +pub fn run() -> sc_cli::Result<()> { + let cli = Cli::from_args(); + + match &cli.subcommand { + Some(Subcommand::Key(cmd)) => cmd.run(&cli), + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + }, + Some(Subcommand::CheckBlock(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::ExportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.database), task_manager)) + }) + }, + Some(Subcommand::ExportState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.chain_spec), task_manager)) + }) + }, + Some(Subcommand::ImportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.database)) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, backend, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, backend, None), task_manager)) + }) + }, + Some(Subcommand::ChainInfo(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run::(&config)) + }, + None => { + let runner = cli.create_runner(&cli.run)?; + runner.run_node_until_exit(|config| async move { + service::new_full(config, cli.consensus).map_err(sc_cli::Error::Service) + }) + }, + } +} diff --git a/substrate/frame/support/test/stg_frame_crate/frame/src/lib.rs b/substrate/bin/minimal/node/src/lib.rs similarity index 90% rename from substrate/frame/support/test/stg_frame_crate/frame/src/lib.rs rename to substrate/bin/minimal/node/src/lib.rs index dba99f1bbe73..c2065def736a 100644 --- a/substrate/frame/support/test/stg_frame_crate/frame/src/lib.rs +++ b/substrate/bin/minimal/node/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod deps { - pub use frame_support; - pub use frame_system; -} +pub mod chain_spec; +pub(crate) mod cli; +pub mod rpc; +pub mod service; diff --git a/substrate/bin/minimal/node/src/main.rs b/substrate/bin/minimal/node/src/main.rs new file mode 100644 index 000000000000..900651fd1fdb --- /dev/null +++ b/substrate/bin/minimal/node/src/main.rs @@ -0,0 +1,30 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Substrate Node Template CLI library. +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; +mod rpc; + +fn main() -> sc_cli::Result<()> { + command::run() +} diff --git a/substrate/bin/minimal/node/src/rpc.rs b/substrate/bin/minimal/node/src/rpc.rs new file mode 100644 index 000000000000..d0c417a93d7a --- /dev/null +++ b/substrate/bin/minimal/node/src/rpc.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A collection of node-specific RPC methods. +//! Substrate provides the `sc-rpc` crate, which defines the core RPC layer +//! used by Substrate nodes. This file extends those RPC definitions with +//! capabilities that are specific to this project's runtime configuration. + +#![warn(missing_docs)] + +use jsonrpsee::RpcModule; +use runtime::interface::{AccountId, Nonce, OpaqueBlock}; +use sc_transaction_pool_api::TransactionPool; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use std::sync::Arc; +use substrate_frame_rpc_system::{System, SystemApiServer}; + +pub use sc_rpc_api::DenyUnsafe; + +/// Full client dependencies. +pub struct FullDeps { + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// Whether to deny unsafe calls + pub deny_unsafe: DenyUnsafe, +} + +/// Instantiate all full RPC extensions. +pub fn create_full( + deps: FullDeps, +) -> Result, Box> +where + C: Send + + Sync + + 'static + + sp_api::ProvideRuntimeApi + + HeaderBackend + + HeaderMetadata + + 'static, + C::Api: sp_block_builder::BlockBuilder, + C::Api: substrate_frame_rpc_system::AccountNonceApi, + P: TransactionPool + 'static, +{ + let mut module = RpcModule::new(()); + let FullDeps { client, pool, deny_unsafe } = deps; + + module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; + + Ok(module) +} diff --git a/substrate/bin/minimal/node/src/service.rs b/substrate/bin/minimal/node/src/service.rs new file mode 100644 index 000000000000..b6369c44dda9 --- /dev/null +++ b/substrate/bin/minimal/node/src/service.rs @@ -0,0 +1,254 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use futures::FutureExt; +use runtime::{self, interface::OpaqueBlock as Block, RuntimeApi}; +use sc_client_api::backend::Backend; +use sc_executor::WasmExecutor; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryWorker}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use std::sync::Arc; + +use crate::cli::Consensus; + +#[cfg(feature = "runtime-benchmarks")] +type HostFunctions = + (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions); + +#[cfg(not(feature = "runtime-benchmarks"))] +type HostFunctions = sp_io::SubstrateHostFunctions; + +pub(crate) type FullClient = + sc_service::TFullClient>; +type FullBackend = sc_service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; + +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sc_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + Option, + >, + ServiceError, +> { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = sc_service::new_wasm_executor(&config); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let import_queue = sc_consensus_manual_seal::import_queue( + Box::new(client.clone()), + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + ); + + Ok(sc_service::PartialComponents { + client, + backend, + task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (telemetry), + }) +} + +/// Builds a new service for a full client. +pub fn new_full(config: Configuration, consensus: Consensus) -> Result { + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: mut telemetry, + } = new_partial(&config)?; + + let net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); + + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + net_config, + block_announce_validator_builder: None, + warp_sync_params: None, + block_relay: None, + })?; + + if config.offchain_worker.enabled { + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + is_validator: config.role.is_authority(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: network.clone(), + enable_http_requests: true, + custom_extensions: |_| vec![], + }) + .run(client.clone(), task_manager.spawn_handle()) + .boxed(), + ); + } + + let rpc_extensions_builder = { + let client = client.clone(); + let pool = transaction_pool.clone(); + + Box::new(move |deny_unsafe, _| { + let deps = + crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; + crate::rpc::create_full(deps).map_err(Into::into) + }) + }; + + let prometheus_registry = config.prometheus_registry().cloned(); + + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network, + client: client.clone(), + keystore: keystore_container.keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_builder: rpc_extensions_builder, + backend, + system_rpc_tx, + tx_handler_controller, + sync_service, + config, + telemetry: telemetry.as_mut(), + })?; + + let proposer = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + + match consensus { + Consensus::InstantSeal => { + let params = sc_consensus_manual_seal::InstantSealParams { + block_import: client.clone(), + env: proposer, + client, + pool: transaction_pool, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers: move |_, ()| async move { + Ok(sp_timestamp::InherentDataProvider::from_system_time()) + }, + }; + + let authorship_future = sc_consensus_manual_seal::run_instant_seal(params); + + task_manager.spawn_essential_handle().spawn_blocking( + "instant-seal", + None, + authorship_future, + ); + }, + Consensus::ManualSeal(block_time) => { + let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); + task_manager.spawn_handle().spawn("block_authoring", None, async move { + loop { + futures_timer::Delay::new(std::time::Duration::from_millis(block_time)).await; + sink.try_send(sc_consensus_manual_seal::EngineCommand::SealNewBlock { + create_empty: true, + finalize: true, + parent_hash: None, + sender: None, + }) + .unwrap(); + } + }); + + let params = sc_consensus_manual_seal::ManualSealParams { + block_import: client.clone(), + env: proposer, + client, + pool: transaction_pool, + select_chain, + commands_stream: Box::pin(commands_stream), + consensus_data_provider: None, + create_inherent_data_providers: move |_, ()| async move { + Ok(sp_timestamp::InherentDataProvider::from_system_time()) + }, + }; + let authorship_future = sc_consensus_manual_seal::run_manual_seal(params); + + task_manager.spawn_essential_handle().spawn_blocking( + "manual-seal", + None, + authorship_future, + ); + }, + } + + network_starter.start_network(); + Ok(task_manager) +} diff --git a/substrate/bin/minimal/runtime/Cargo.toml b/substrate/bin/minimal/runtime/Cargo.toml new file mode 100644 index 000000000000..1f9b49da7bc3 --- /dev/null +++ b/substrate/bin/minimal/runtime/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "minimal-runtime" +version = "0.1.0" +edition = "2021" + +[dependencies] +parity-scale-codec = { version = "3.0.0", default-features = false } +scale-info = { version = "2.6.0", default-features = false } + +# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. +frame = { path = "../../../frame", default-features = false, features = ["runtime", "experimental"] } + +# pallets that we want to use +pallet-balances = { path = "../../../frame/balances", default-features = false } +pallet-sudo = { path = "../../../frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } + + +[build-dependencies] +substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } + +[features] +default = [ "std" ] +std = [ + "frame/std", + "pallet-balances/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "parity-scale-codec/std", + "scale-info/std", + "substrate-wasm-builder", +] diff --git a/substrate/bin/minimal/runtime/build.rs b/substrate/bin/minimal/runtime/build.rs new file mode 100644 index 000000000000..b7676a70dfe8 --- /dev/null +++ b/substrate/bin/minimal/runtime/build.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +fn main() { + #[cfg(feature = "std")] + { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build(); + } +} diff --git a/substrate/bin/minimal/runtime/src/lib.rs b/substrate/bin/minimal/runtime/src/lib.rs new file mode 100644 index 000000000000..6c59592554c5 --- /dev/null +++ b/substrate/bin/minimal/runtime/src/lib.rs @@ -0,0 +1,231 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +use frame::{ + deps::frame_support::weights::{FixedFee, NoFee}, + prelude::*, + runtime::{ + apis::{ + self, impl_runtime_apis, ApplyExtrinsicResult, CheckInherentsResult, OpaqueMetadata, + }, + prelude::*, + }, +}; + +#[runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("minimal-runtime"), + impl_name: create_runtime_str!("minimal-runtime"), + authoring_version: 1, + spec_version: 0, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + state_version: 1, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); + +construct_runtime!( + pub struct Runtime { + System: frame_system, + Timestamp: pallet_timestamp, + + Balances: pallet_balances, + Sudo: pallet_sudo, + TransactionPayment: pallet_transaction_payment, + } +); + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; +} + +#[derive_impl(frame_system::config_preludes::SolochainDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; + type Version = Version; + type BlockHashCount = ConstU32<1024>; + type AccountData = pallet_balances::AccountData<::Balance>; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Runtime { + type AccountStore = System; +} + +#[derive_impl(pallet_sudo::config_preludes::TestDefaultConfig as pallet_sudo::DefaultConfig)] +impl pallet_sudo::Config for Runtime {} + +#[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig as pallet_timestamp::DefaultConfig)] +impl pallet_timestamp::Config for Runtime {} + +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; + type WeightToFee = NoFee<::Balance>; + type LengthToFee = FixedFee<1, ::Balance>; +} + +type Block = frame::runtime::types_common::BlockOf; +type Header = HeaderFor; + +type RuntimeExecutive = + Executive, Runtime, AllPalletsWithSystem>; + +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; + +impl_runtime_apis! { + impl apis::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + RuntimeExecutive::execute_block(block) + } + + fn initialize_block(header: &Header) { + RuntimeExecutive::initialize_block(header) + } + } + impl apis::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> Vec { + Runtime::metadata_versions() + } + } + + impl apis::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ExtrinsicFor) -> ApplyExtrinsicResult { + RuntimeExecutive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> HeaderFor { + RuntimeExecutive::finalize_block() + } + + fn inherent_extrinsics(data: InherentData) -> Vec> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: InherentData, + ) -> CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl apis::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ExtrinsicFor, + block_hash: ::Hash, + ) -> TransactionValidity { + RuntimeExecutive::validate_transaction(source, tx, block_hash) + } + } + + impl apis::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &HeaderFor) { + RuntimeExecutive::offchain_worker(header) + } + } + + impl apis::SessionKeys for Runtime { + fn generate_session_keys(_seed: Option>) -> Vec { + Default::default() + } + + fn decode_session_keys( + _encoded: Vec, + ) -> Option, apis::KeyTypeId)>> { + Default::default() + } + } + + impl apis::AccountNonceApi for Runtime { + fn account_nonce(account: interface::AccountId) -> interface::Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + interface::Balance, + > for Runtime { + fn query_info(uxt: ExtrinsicFor, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ExtrinsicFor, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> interface::Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> interface::Balance { + TransactionPayment::length_to_fee(length) + } + } +} + +/// Some re-exports that the node side code needs to know. Some are useful in this context as well. +/// +/// Other types should preferably be private. +// TODO: this should be standardized in some way, see: +// https://github.com/paritytech/substrate/issues/10579#issuecomment-1600537558 +pub mod interface { + use super::Runtime; + use frame::deps::frame_system; + + pub type Block = super::Block; + pub use frame::runtime::types_common::OpaqueBlock; + pub type AccountId = ::AccountId; + pub type Nonce = ::Nonce; + pub type Hash = ::Hash; + pub type Balance = ::Balance; + pub type MinimumBalance = ::ExistentialDeposit; +} diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index 41cd5f3127e8..e3608f6716c2 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -236,7 +236,7 @@ pub async fn run_instant_seal( // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. let commands_stream = pool.import_notification_stream().map(|_| EngineCommand::SealNewBlock { - create_empty: false, + create_empty: true, finalize: false, parent_hash: None, sender: None, diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml new file mode 100644 index 000000000000..f7b35aba3583 --- /dev/null +++ b/substrate/frame/Cargo.toml @@ -0,0 +1,91 @@ +[package] +name = "frame" +version = "0.0.1-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "paritytech.github.io" +repository.workspace = true +description = "The single package to get you started with building frame pallets and runtimes" + +[package.metadata.docs.rs] +# enable `experimental` feature for docs +features = ["experimental"] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# external deps +parity-scale-codec = { version = "3.2.2", default-features = false, features = ["derive"] } +scale-info = { version = "2.6.0", default-features = false, features = ["derive"] } + +# primitive deps, used for developing FRAME pallets. +sp-runtime = { default-features = false, path = "../primitives/runtime" } +sp-std = { default-features = false, path = "../primitives/std" } +sp-io = { default-features = false, path = "../primitives/io" } +sp-core = { default-features = false, path = "../primitives/core" } +sp-arithmetic = { default-features = false, path = "../primitives/arithmetic" } + +# frame deps, for developing FRAME pallets. +frame-support = { default-features = false, path = "support" } +frame-system = { default-features = false, path = "system" } + +# primitive types used for developing FRAME runtimes. +sp-version = { default-features = false, path = "../primitives/version", optional = true } +sp-api = { default-features = false, path = "../primitives/api", optional = true } +sp-block-builder = { default-features = false, path = "../primitives/block-builder", optional = true } +sp-transaction-pool = { default-features = false, path = "../primitives/transaction-pool", optional = true } +sp-offchain = { default-features = false, path = "../primitives/offchain", optional = true } +sp-session = { default-features = false, path = "../primitives/session", optional = true } +sp-consensus-aura = { default-features = false, path = "../primitives/consensus/aura", optional = true } +sp-consensus-grandpa = { default-features = false, path = "../primitives/consensus/grandpa", optional = true } +sp-inherents = { default-features = false, path = "../primitives/inherents", optional = true } + +frame-executive = { default-features = false, path = "../frame/executive", optional = true } +frame-system-rpc-runtime-api = { default-features = false, path = "../frame/system/rpc/runtime-api", optional = true } + +docify = "0.2.0" +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", rev = "e48b187bcfd5cc75111acd9d241f1bd36604344b" } +log = { version = "0.4.20", default-features = false } + +[dev-dependencies] +pallet-examples = { path = "./examples" } + +[features] +default = [ "runtime", "std" ] +experimental = [ "frame-support/experimental", "frame-system/experimental" ] +runtime = [ + "frame-executive", + "frame-system-rpc-runtime-api", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-inherents", + "sp-offchain", + "sp-session", + "sp-transaction-pool", + "sp-version", +] +std = [ + "frame-executive?/std", + "frame-support/std", + "frame-system-rpc-runtime-api?/std", + "frame-system/std", + "log/std", + "parity-scale-codec/std", + "scale-info/std", + "sp-api?/std", + "sp-arithmetic/std", + "sp-block-builder?/std", + "sp-consensus-aura?/std", + "sp-consensus-grandpa?/std", + "sp-core/std", + "sp-inherents?/std", + "sp-io/std", + "sp-offchain?/std", + "sp-runtime/std", + "sp-session?/std", + "sp-std/std", + "sp-transaction-pool?/std", + "sp-version?/std", +] diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 9c47d7442111..98c4e51889bb 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -12,11 +12,12 @@ description = "The single package with examples of various types of FRAME pallet targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-example-basic = { path = "basic", default-features = false} pallet-default-config-example = { path = "default-config", default-features = false} -pallet-example-offchain-worker = { path = "offchain-worker", default-features = false} -pallet-example-kitchensink = { path = "kitchensink", default-features = false} pallet-dev-mode = { path = "dev-mode", default-features = false} +pallet-example-basic = { path = "basic", default-features = false} +pallet-example-frame-crate = { path = "frame-crate", default-features = false } +pallet-example-kitchensink = { path = "kitchensink", default-features = false} +pallet-example-offchain-worker = { path = "offchain-worker", default-features = false} pallet-example-split = { path = "split", default-features = false} [features] @@ -25,6 +26,7 @@ std = [ "pallet-default-config-example/std", "pallet-dev-mode/std", "pallet-example-basic/std", + "pallet-example-frame-crate/std", "pallet-example-kitchensink/std", "pallet-example-offchain-worker/std", "pallet-example-split/std", diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml new file mode 100644 index 000000000000..d525008e5255 --- /dev/null +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "pallet-example-frame-crate" +version = "0.0.1" +authors = ["Parity Technologies "] +edition = "2021" +license = "MIT-0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME example pallet with umbrella crate" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } + +frame = { path = "../..", default-features = false, features = ["runtime", "experimental"] } + + +[features] +default = [ "std" ] +std = [ "codec/std", "frame/std", "scale-info/std" ] diff --git a/substrate/frame/examples/frame-crate/src/lib.rs b/substrate/frame/examples/frame-crate/src/lib.rs new file mode 100644 index 000000000000..0fea2c22cf54 --- /dev/null +++ b/substrate/frame/examples/frame-crate/src/lib.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame::prelude::*; + +#[frame::pallet(dev_mode)] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: IsType<::RuntimeEvent> + From>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + pub enum Event {} + + #[pallet::storage] + pub type Value = StorageValue; + + #[pallet::call] + impl Pallet { + pub fn some_dispatchable(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +#[cfg(test)] +mod tests { + use crate::pallet as my_pallet; + use frame::testing_prelude::*; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + MyPallet: my_pallet, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl my_pallet::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + } +} diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index a7084fc6ef9b..8d65639f8352 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -40,4 +40,7 @@ //! - [`pallet_example_split`]: A simple example of a FRAME pallet demonstrating the ability to //! split sections across multiple files. //! +//! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be +//! built using only the `frame` umbrella crate. +//! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs new file mode 100644 index 000000000000..1a8350405a89 --- /dev/null +++ b/substrate/frame/src/lib.rs @@ -0,0 +1,347 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! > Made for [![polkadot]](https://polkadot.network) +//! +//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white +//! +//! # FRAME +//! +//! ```no_compile +//! ______ ______ ________ ___ __ __ ______ +//! /_____/\ /_____/\ /_______/\ /__//_//_/\ /_____/\ +//! \::::_\/_\:::_ \ \ \::: _ \ \\::\| \| \ \\::::_\/_ +//! \:\/___/\\:(_) ) )_\::(_) \ \\:. \ \\:\/___/\ +//! \:::._\/ \: __ `\ \\:: __ \ \\:.\-/\ \ \\::___\/_ +//! \:\ \ \ \ `\ \ \\:.\ \ \ \\. \ \ \ \\:\____/\ +//! \_\/ \_\/ \_\/ \__\/\__\/ \__\/ \__\/ \_____\/ +//! ``` +//! +//! > **F**ramework for **R**untime **A**ggregation of **M**odularized **E**ntities: Substrate's +//! > State Transition Function (Runtime) Framework. +//! +//! ## Warning: Experimental +//! +//! This crate and all of its content is experimental, and should not yet be used in production. +//! +//! ## Getting Started +//! +//! TODO: link to `developer_hub::polkadot_sdk::frame`. The `developer_hub` hasn't been published +//! yet, this can be updated once it is linkable. + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg(feature = "experimental")] + +/// Exports the main pallet macro. This can wrap a `mod pallet` and will transform it into +/// being a pallet, eg `#[frame::pallet] mod pallet { .. }`. +/// +/// Note that this is not part of the prelude, in order to make it such that the common way to +/// define a macro is `#[frame::pallet] mod pallet { .. }`, followed by `#[pallet::foo]`, +/// `#[pallet::bar]` inside the mod. +pub use frame_support::pallet; + +/// The logging library of the runtime. Can normally be the classic `log` crate. +pub use log; + +/// The main prelude of FRAME. +/// +/// This prelude should almost always be the first line of code in any pallet or runtime. +/// +/// ``` +/// use frame::prelude::*; +/// +/// // rest of your pallet.. +/// mod pallet {} +/// ``` +pub mod prelude { + /// `frame_system`'s parent crate, which is mandatory in all pallets build with this crate. + /// + /// Conveniently, the keyword `frame_system` is in scope as one uses `use + /// frame::prelude::*` + #[doc(inline)] + pub use frame_system; + + /// Pallet prelude of `frame-support`. + /// + /// Note: this needs to revised once `frame-support` evolves. + // `frame-support` will be break down https://github.com/paritytech/polkadot-sdk/issues/127 and its reexports will + // most likely change. These wildcard reexportings can be optimized once `frame-support` has + // changed. + #[doc(no_inline)] + pub use frame_support::pallet_prelude::*; + + /// Pallet prelude of `frame-system`. + #[doc(no_inline)] + pub use frame_system::pallet_prelude::*; + + /// All of the std alternative types. + #[doc(no_inline)] + pub use sp_std::prelude::*; + + /// All FRAME-relevant derive macros. + #[doc(no_inline)] + pub use super::derive::*; +} + +/// The main testing prelude of FRAME. +/// +/// A test setup typically starts with: +/// +/// ``` +/// use frame::testing_prelude::*; +/// // rest of your test setup. +/// ``` +#[cfg(feature = "std")] +pub mod testing_prelude { + pub use super::prelude::*; + /// Testing includes building a runtime, so we bring in all preludes related to runtimes as + /// well. + pub use super::runtime::testing_prelude::*; + + /// Other helper macros from `frame_support` that help with asserting in tests. + pub use frame_support::{ + assert_err, assert_err_ignore_postinfo, assert_error_encoded_size, assert_noop, assert_ok, + assert_storage_noop, storage_alias, + }; + + pub use frame_system::{self, mocking::*}; + pub use sp_io::TestExternalities as TestState; + pub use sp_std::if_std; +} + +/// All of the types and tools needed to build FRAME-based runtimes. +#[cfg(any(feature = "runtime", feature = "std"))] +pub mod runtime { + /// The main prelude of `FRAME` for building runtimes. + /// + /// A runtime typically starts with: + /// + /// ``` + /// use frame::{prelude::*, runtime::prelude::*}; + /// ``` + pub mod prelude { + /// All of the types related to the FRAME runtime executive. + pub use frame_executive::*; + + /// Macro to amalgamate the runtime into `struct Runtime`. + pub use frame_support::construct_runtime; + + /// Macro to easily derive the `Config` trait of various pallet for `Runtime`. + pub use frame_support::derive_impl; + + /// Macros to easily impl traits such as `Get` for types. + // TODO: using linking in the Get in the line above triggers an ICE :/ + pub use frame_support::{ord_parameter_types, parameter_types}; + + /// Const types that can easily be used in conjuncture with `Get`. + pub use frame_support::traits::{ + ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, + ConstU32, ConstU64, ConstU8, + }; + + /// Types to define your runtime version. + pub use sp_version::{create_runtime_str, runtime_version, RuntimeVersion}; + + #[cfg(feature = "std")] + pub use sp_version::NativeVersion; + } + + /// Types and traits for runtimes that implement runtime APIs. + /// + /// A testing runtime should not need this. + /// + /// A non-testing runtime should have this enabled, as such: + /// + /// ``` + /// use frame::runtime::{prelude::*, apis::{*,}}; + /// ``` + // TODO: This is because of wildcard imports, and it should be not needed once we can avoid + // that. Imports like that are needed because we seem to need some unknown types in the macro + // expansion. See `sp_session::runtime_api::*;` as one example. All runtime api decls should be + // moved to file similarly. + #[allow(ambiguous_glob_reexports)] + pub mod apis { + // Types often used in the runtime APIs. + pub use sp_core::OpaqueMetadata; + pub use sp_inherents::{CheckInherentsResult, InherentData}; + pub use sp_runtime::ApplyExtrinsicResult; + + /// Macro to implement runtime APIs. + pub use sp_api::impl_runtime_apis; + + pub use frame_system_rpc_runtime_api::*; + pub use sp_api::{self, *}; + pub use sp_block_builder::*; + pub use sp_consensus_aura::*; + pub use sp_consensus_grandpa::*; + pub use sp_offchain::*; + pub use sp_session::runtime_api::*; + pub use sp_transaction_pool::runtime_api::*; + } + + /// A set of opinionated types aliases commonly used in runtimes. + /// + /// This is one set of opinionated types. They are compatible with one another, but are not + /// guaranteed to work if you start tweaking a portion. + /// + /// Some note-worthy opinions in this prelude: + /// + /// - `u32` block number. + /// - [`sp_runtime::MultiAddress`] and [`sp_runtime::MultiSignature`] are used as the account id + /// and signature types. This implies that this prelude can possibly used with an + /// "account-index" system (eg `pallet-indices`). And, in any case, it should be paired with + /// `AccountIdLookup` in [`frame_system::Config::Lookup`]. + pub mod types_common { + use frame_system::Config as SysConfig; + use sp_runtime::{generic, traits, OpaqueExtrinsic}; + + /// A signature type compatible capably of handling multiple crypto-schemes. + pub type Signature = sp_runtime::MultiSignature; + + /// The corresponding account-id type of [`Signature`]. + pub type AccountId = + <::Signer as traits::IdentifyAccount>::AccountId; + + /// The block-number type, which should be fed into [`frame_system::Config`]. + pub type BlockNumber = u32; + + /// TODO: Ideally we want the hashing type to be equal to SysConfig::Hashing? + type HeaderInner = generic::Header; + + // NOTE: `AccountIndex` is provided for future compatibility, if you want to introduce + // something like `pallet-indices`. + type ExtrinsicInner = generic::UncheckedExtrinsic< + sp_runtime::MultiAddress, + ::RuntimeCall, + Signature, + Extra, + >; + + /// The block type, which should be fed into [`frame_system::Config`]. + /// + /// Should be parameterized with `T: frame_system::Config` and a tuple of `SignedExtension`. + /// When in doubt, use [`SystemSignedExtensionsOf`]. + // Note that this cannot be dependent on `T` for block-number because it would lead to a + // circular dependency (self-referential generics). + pub type BlockOf = generic::Block>; + + /// The opaque block type. This is the same [`BlockOf`], but it has + /// [`sp_runtime::OpaqueExtrinsic`] as its final extrinsic type. + /// + /// This should be provided to the client side as the extrinsic type. + pub type OpaqueBlock = generic::Block; + + /// Default set of signed extensions exposed from the `frame_system`. + /// + /// crucially, this does NOT contain any tx-payment extension. + pub type SystemSignedExtensionsOf = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + ); + } + + /// The main prelude of FRAME for building runtimes, and in the context of testing. + /// + /// counter part of `runtime::prelude`. + #[cfg(feature = "std")] + pub mod testing_prelude { + pub use super::prelude::*; + pub use sp_core::storage::Storage; + pub use sp_runtime::BuildStorage; + } +} + +/// All traits often used in FRAME pallets. +/// +/// Note that types implementing these traits can also be found in this module. +// TODO: `Hash` and `Bounded` are defined multiple times; should be fixed once these two crates are +// cleaned up. +#[allow(ambiguous_glob_reexports)] +pub mod traits { + pub use frame_support::traits::*; + pub use sp_runtime::traits::*; +} + +/// The arithmetic types used for safe math. +pub mod arithmetic { + pub use sp_arithmetic::{traits::*, *}; +} + +/// Low level primitive types used in FRAME pallets. +pub mod primitives { + pub use sp_core::{H160, H256, H512, U256, U512}; + pub use sp_runtime::traits::{BlakeTwo256, Hash, Keccak256}; +} + +/// All derive macros used in frame. +/// +/// This is already part of the [`prelude`]. +pub mod derive { + pub use frame_support::{ + CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, + RuntimeDebugNoBound, + }; + pub use parity_scale_codec::{Decode, Encode}; + pub use scale_info::TypeInfo; + pub use sp_runtime::RuntimeDebug; + pub use sp_std::fmt::Debug; +} + +/// Access to all of the dependencies of this crate. In case the re-exports are not enough, this +/// module can be used. +/// +/// Any time one uses this module to access a dependency, you can have a moment to think about +/// whether this item could have been placed in any of the other modules and preludes in this crate. +/// In most cases, hopefully the answer is yes. +pub mod deps { + // TODO: It would be great to somehow instruct RA to prefer *not* suggesting auto-imports from + // these. For example, we prefer `frame::derive::CloneNoBound` rather than + // `frame::deps::frame_support::CloneNoBound`. + pub use frame_support; + pub use frame_system; + + pub use sp_arithmetic; + pub use sp_core; + pub use sp_io; + pub use sp_runtime; + pub use sp_std; + + pub use parity_scale_codec as codec; + pub use scale_info; + + #[cfg(feature = "runtime")] + pub use frame_executive; + #[cfg(feature = "runtime")] + pub use sp_api; + #[cfg(feature = "runtime")] + pub use sp_block_builder; + #[cfg(feature = "runtime")] + pub use sp_consensus_aura; + #[cfg(feature = "runtime")] + pub use sp_consensus_grandpa; + #[cfg(feature = "runtime")] + pub use sp_inherents; + #[cfg(feature = "runtime")] + pub use sp_offchain; + #[cfg(feature = "runtime")] + pub use sp_version; +} diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index fb29c0da42a9..36de44d9d729 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -148,12 +148,34 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - #[pallet::config] + /// Default preludes for [`Config`]. + pub mod config_preludes { + use super::*; + use frame_support::derive_impl; + + /// Default prelude sensible to be used in a testing environment. + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + type WeightInfo = (); + #[inject_runtime_type] + type RuntimeEvent = (); + #[inject_runtime_type] + type RuntimeCall = (); + } + } + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// A sudo-able call. + #[pallet::no_default_bounds] type RuntimeCall: Parameter + UnfilteredDispatchable + GetDispatchInfo; diff --git a/substrate/frame/sudo/src/mock.rs b/substrate/frame/sudo/src/mock.rs index 9e78e474f4e5..427bda6d99e4 100644 --- a/substrate/frame/sudo/src/mock.rs +++ b/substrate/frame/sudo/src/mock.rs @@ -19,8 +19,8 @@ use super::*; use crate as sudo; -use frame_support::traits::{ConstU32, ConstU64, Contains}; -use sp_core::H256; +use frame_support::traits::{ConstU32, Contains}; +use sp_core::{ConstU64, H256}; use sp_io; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index f54252ff9d61..1c696bbb84ac 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -802,16 +802,20 @@ pub mod testing_prelude { /// Prelude to be used alongside pallet macro, for ease of use. pub mod pallet_prelude { pub use crate::{ + defensive, defensive_assert, dispatch::{DispatchClass, DispatchResult, DispatchResultWithPostInfo, Parameter, Pays}, ensure, inherent::{InherentData, InherentIdentifier, ProvideInherent}, storage, storage::{ + bounded_btree_map::BoundedBTreeMap, + bounded_btree_set::BoundedBTreeSet, bounded_vec::BoundedVec, types::{ CountedStorageMap, CountedStorageNMap, Key as NMapKey, OptionQuery, ResultQuery, StorageDoubleMap, StorageMap, StorageNMap, StorageValue, ValueQuery, }, + weak_bounded_vec::WeakBoundedVec, StorageList, }, traits::{ diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 64c6147dd1f1..0f9617c03687 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -frame = { path = "frame", default-features = false} +frame = { path = "../../..", default-features = false, features = ["runtime", "experimental"]} scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } [features] diff --git a/substrate/frame/support/test/stg_frame_crate/frame/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/frame/Cargo.toml deleted file mode 100644 index d99ac2d2d46e..000000000000 --- a/substrate/frame/support/test/stg_frame_crate/frame/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "frame" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -publish = false -homepage = "https://substrate.io" -repository.workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -frame-system = { path = "../../../../system", default-features = false} -frame-support = { path = "../../..", default-features = false} - -[features] -default = [ "std" ] -std = [ "frame-support/std", "frame-system/std" ] diff --git a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs new file mode 100644 index 000000000000..573ceb6dfab7 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs @@ -0,0 +1,45 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + use frame::deps::frame_system::pallet_prelude::BlockNumberFor; + use frame_support::pallet_prelude::{Hooks, IsType}; + + #[pallet::config] + pub trait Config: frame::deps::frame_system::Config { + type Bar: Clone + std::fmt::Debug + Eq; + type RuntimeEvent: IsType<::RuntimeEvent> + + From>; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr new file mode 100644 index 000000000000..0f805c972e4d --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr @@ -0,0 +1,5 @@ +error: Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must bound: `IsType<::RuntimeEvent>` + --> tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs:26:3 + | +26 | type RuntimeEvent: IsType<::RuntimeEvent> + | ^^^^ diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 908d8092eef5..f7733e312c3b 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -52,6 +52,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "sp-runtime/try-runtime" ] +experimental = [] [[bench]] name = "bench" diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 897d3bd7ce91..eca20f5a0a9f 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -244,6 +244,53 @@ pub mod pallet { type BlockHashCount = frame_support::traits::ConstU64<10>; type OnSetCode = (); } + + /// Default configurations of this pallet in a solo-chain environment. + /// + /// ## Considerations: + /// + /// By default, this type makes the following choices: + /// + /// * Use a normal 32 byte account id, with a [`DefaultConfig::Lookup`] that implies no + /// 'account-indexing' pallet is being used. + /// * Given that we don't know anything about the existence of a currency system in scope, + /// an [`DefaultConfig::AccountData`] is chosen that has no addition data. Overwrite this + /// if you use `pallet-balances` or similar. + /// * Make sure to overwrite [`DefaultConfig::Version`]. + /// * 2s block time, and a default 5mb block size is used. + #[cfg(feature = "experimental")] + pub struct SolochainDefaultConfig; + + #[cfg(feature = "experimental")] + #[frame_support::register_default_impl(SolochainDefaultConfig)] + impl DefaultConfig for SolochainDefaultConfig { + type Nonce = u32; + type Hash = sp_core::hash::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = sp_runtime::AccountId32; + type Lookup = sp_runtime::traits::AccountIdLookup; + type MaxConsumers = frame_support::traits::ConstU32<128>; + type AccountData = crate::AccountInfo; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type Version = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + #[inject_runtime_type] + type RuntimeEvent = (); + #[inject_runtime_type] + type RuntimeOrigin = (); + #[inject_runtime_type] + type RuntimeCall = (); + #[inject_runtime_type] + type PalletInfo = (); + type BaseCallFilter = frame_support::traits::Everything; + type BlockHashCount = frame_support::traits::ConstU32<256>; + type OnSetCode = (); + } } /// System configuration trait. Implemented by runtime. @@ -1817,4 +1864,11 @@ pub mod pallet_prelude { /// Type alias for the `BlockNumber` associated type of system config. pub type BlockNumberFor = as sp_runtime::traits::Header>::Number; + + /// Type alias for the `Extrinsic` associated type of system config. + pub type ExtrinsicFor = + <::Block as sp_runtime::traits::Block>::Extrinsic; + + /// Type alias for the `RuntimeCall` associated type of system config. + pub type RuntimeCallFor = ::RuntimeCall; } diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index ad055bab004f..a62ac6d633d0 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -31,7 +31,7 @@ //! //! See the [`pallet`] module for more information about the interfaces this pallet exposes, //! including its configuration trait, dispatchables, storage items, events and errors. -//! +//! //! ## Overview //! //! The Timestamp pallet is designed to create a consensus-based time source. This helps ensure that @@ -144,12 +144,33 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{derive_impl, pallet_prelude::*}; use frame_system::pallet_prelude::*; - #[pallet::config] + /// Default preludes for [`Config`]. + pub mod config_preludes { + use super::*; + + /// Default prelude sensible to be used in a testing environment. + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = frame_support::traits::ConstU64<1>; + type WeightInfo = (); + } + } + + /// The pallet configuration trait + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// Type used for expressing a timestamp. + #[pallet::no_default_bounds] type Moment: Parameter + Default + AtLeast32Bit diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index 8160d72ad894..efadfd60bdd3 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -319,9 +319,29 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - #[pallet::config] + pub mod config_preludes { + use super::*; + use frame_support::derive_impl; + + /// Default prelude sensible to be used in a testing environment. + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + #[inject_runtime_type] + type RuntimeEvent = (); + type FeeMultiplierUpdate = (); + type OperationalFeeMultiplier = (); + } + } + + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Handler for withdrawing, refunding and depositing the transaction fee. @@ -330,12 +350,24 @@ pub mod pallet { /// adjusted, depending on the used resources by the transaction. If the /// transaction weight is lower than expected, parts of the transaction fee /// might be refunded. In the end the fees can be deposited. + #[pallet::no_default] type OnChargeTransaction: OnChargeTransaction; - /// A fee mulitplier for `Operational` extrinsics to compute "virtual tip" to boost their + /// Convert a weight value into a deductible fee based on the currency type. + #[pallet::no_default] + type WeightToFee: WeightToFee>; + + /// Convert a length value into a deductible fee based on the currency type. + #[pallet::no_default] + type LengthToFee: WeightToFee>; + + /// Update the multiplier of the next block, based on the previous block's weight. + type FeeMultiplierUpdate: MultiplierUpdate; + + /// A fee multiplier for `Operational` extrinsics to compute "virtual tip" to boost their /// `priority` /// - /// This value is multipled by the `final_fee` to obtain a "virtual tip" that is later + /// This value is multiplied by the `final_fee` to obtain a "virtual tip" that is later /// added to a tip component in regular `priority` calculations. /// It means that a `Normal` transaction can front-run a similarly-sized `Operational` /// extrinsic (with no tip), by including a tip value greater than the virtual tip. @@ -355,15 +387,6 @@ pub mod pallet { /// transactions. #[pallet::constant] type OperationalFeeMultiplier: Get; - - /// Convert a weight value into a deductible fee based on the currency type. - type WeightToFee: WeightToFee>; - - /// Convert a length value into a deductible fee based on the currency type. - type LengthToFee: WeightToFee>; - - /// Update the multiplier of the next block, based on the previous block's weight. - type FeeMultiplierUpdate: MultiplierUpdate; } #[pallet::type_value] diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index c9389154bbf4..e261b162b5aa 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -15,21 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::common::API_VERSION_ATTRIBUTE; +use inflector::Inflector; use proc_macro2::{Span, TokenStream}; - +use proc_macro_crate::{crate_name, FoundCrate}; +use quote::{format_ident, quote, ToTokens}; use syn::{ parse_quote, spanned::Spanned, token::And, Attribute, Error, FnArg, GenericArgument, Ident, ImplItem, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; -use quote::{format_ident, quote}; - -use proc_macro_crate::{crate_name, FoundCrate}; - -use crate::common::API_VERSION_ATTRIBUTE; - -use inflector::Inflector; - /// Generates the access to the `sc_client` crate. pub fn generate_crate_access() -> TokenStream { match crate_name("sp-api") { @@ -38,10 +33,15 @@ pub fn generate_crate_access() -> TokenStream { let renamed_name = Ident::new(&renamed_name, Span::call_site()); quote!(#renamed_name) }, - Err(e) => { - let err = Error::new(Span::call_site(), e).to_compile_error(); - quote!( #err ) - }, + Err(e) => + if let Ok(FoundCrate::Name(name)) = crate_name(&"frame") { + let path = format!("{}::deps::{}", name, "sp_api"); + let path = syn::parse_str::(&path).expect("is a valid path; qed"); + quote!( #path ) + } else { + let err = Error::new(Span::call_site(), e).to_compile_error(); + quote!( #err ) + }, } } @@ -261,8 +261,6 @@ pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { /// Extract the documentation from the provided attributes. #[cfg(feature = "frame-metadata")] pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { - use quote::ToTokens; - attrs .iter() .filter_map(|attr| { diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index ccb61c12f321..f7e2c56ca9a4 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -942,8 +942,7 @@ pub trait Pair: CryptoType + Sized { /// - the path may be followed by `///`, in which case everything after the `///` is treated /// as a password. /// - If `s` begins with a `/` character it is prefixed with the Substrate public `DEV_PHRASE` - /// and - /// interpreted as above. + /// and interpreted as above. /// /// In this case they are interpreted as HDKD junctions; purely numeric items are interpreted as /// integers, non-numeric items as strings. Junctions prefixed with `/` are interpreted as soft diff --git a/substrate/primitives/keyring/src/ed25519.rs b/substrate/primitives/keyring/src/ed25519.rs index c3ad86409e90..3060bfb1ad98 100644 --- a/substrate/primitives/keyring/src/ed25519.rs +++ b/substrate/primitives/keyring/src/ed25519.rs @@ -35,6 +35,12 @@ pub enum Keyring { Dave, Eve, Ferdie, + AliceStash, + BobStash, + CharlieStash, + DaveStash, + EveStash, + FerdieStash, One, Two, } @@ -104,6 +110,12 @@ impl From for &'static str { Keyring::Dave => "Dave", Keyring::Eve => "Eve", Keyring::Ferdie => "Ferdie", + Keyring::AliceStash => "Alice//stash", + Keyring::BobStash => "Bob//stash", + Keyring::CharlieStash => "Charlie//stash", + Keyring::DaveStash => "Dave//stash", + Keyring::EveStash => "Eve//stash", + Keyring::FerdieStash => "Ferdie//stash", Keyring::One => "One", Keyring::Two => "Two", } diff --git a/substrate/primitives/keyring/src/lib.rs b/substrate/primitives/keyring/src/lib.rs index 1db18f7edbdc..ee7fd56ba11b 100644 --- a/substrate/primitives/keyring/src/lib.rs +++ b/substrate/primitives/keyring/src/lib.rs @@ -27,9 +27,8 @@ pub mod ed25519; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; -/// Convenience export: Sr25519's Keyring is exposed as `AccountKeyring`, -/// since it tends to be used for accounts (although it may also be used -/// by authorities). +/// Convenience export: Sr25519's Keyring is exposed as `AccountKeyring`, since it tends to be +/// used for accounts (although it may also be used by authorities). pub use sr25519::Keyring as AccountKeyring; #[cfg(feature = "bandersnatch-experimental")] diff --git a/substrate/primitives/keyring/src/sr25519.rs b/substrate/primitives/keyring/src/sr25519.rs index c738cfdc59d9..914a66b4d837 100644 --- a/substrate/primitives/keyring/src/sr25519.rs +++ b/substrate/primitives/keyring/src/sr25519.rs @@ -35,6 +35,12 @@ pub enum Keyring { Dave, Eve, Ferdie, + AliceStash, + BobStash, + CharlieStash, + DaveStash, + EveStash, + FerdieStash, One, Two, } @@ -114,6 +120,12 @@ impl From for &'static str { Keyring::Dave => "Dave", Keyring::Eve => "Eve", Keyring::Ferdie => "Ferdie", + Keyring::AliceStash => "Alice//stash", + Keyring::BobStash => "Bob//stash", + Keyring::CharlieStash => "Charlie//stash", + Keyring::DaveStash => "Dave//stash", + Keyring::EveStash => "Eve//stash", + Keyring::FerdieStash => "Ferdie//stash", Keyring::One => "One", Keyring::Two => "Two", } diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 0b1cd2b54290..1cdc0b8e4051 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -134,16 +134,16 @@ impl Checkable - for UncheckedExtrinsic +impl Checkable + for UncheckedExtrinsic where - Address: Member + MaybeDisplay, + LookupSource: Member + MaybeDisplay, Call: Encode + Member, Signature: Member + traits::Verify, ::Signer: IdentifyAccount, Extra: SignedExtension, AccountId: Member + MaybeDisplay, - Lookup: traits::Lookup, + Lookup: traits::Lookup, { type Checked = CheckedExtrinsic; diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index dd861ad05de9..0e1d4c31fd71 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -466,7 +466,7 @@ impl Verify for MultiSignature { } /// Signature verify that can work with any known signature types.. -#[derive(Eq, PartialEq, Clone, Default, Encode, Decode, RuntimeDebug)] +#[derive(Eq, PartialEq, Clone, Default, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct AnySignature(H512); diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits.rs index 17dc7ce50ea8..ec79f43cabdc 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits.rs @@ -226,7 +226,7 @@ pub trait StaticLookup { } /// A lookup implementation returning the input value. -#[derive(Default)] +#[derive(Default, Clone, Copy, PartialEq, Eq)] pub struct IdentityLookup(PhantomData); impl StaticLookup for IdentityLookup { type Source = T; @@ -1666,8 +1666,6 @@ impl SignedExtension for Tuple { } } -/// Only for bare bone testing when you don't care about signed extensions at all. -#[cfg(feature = "std")] impl SignedExtension for () { type AccountId = u64; type AdditionalSigned = (); diff --git a/substrate/primitives/session/src/lib.rs b/substrate/primitives/session/src/lib.rs index 45395e9766f5..9933495fd601 100644 --- a/substrate/primitives/session/src/lib.rs +++ b/substrate/primitives/session/src/lib.rs @@ -26,28 +26,12 @@ use sp_api::ProvideRuntimeApi; #[cfg(feature = "std")] use sp_runtime::traits::Block as BlockT; -use sp_core::{crypto::KeyTypeId, RuntimeDebug}; +use sp_core::RuntimeDebug; use sp_staking::SessionIndex; use sp_std::vec::Vec; -sp_api::decl_runtime_apis! { - /// Session keys runtime api. - pub trait SessionKeys { - /// Generate a set of session keys with optionally using the given seed. - /// The keys should be stored within the keystore exposed via runtime - /// externalities. - /// - /// The seed needs to be a valid `utf8` string. - /// - /// Returns the concatenated SCALE encoded public keys. - fn generate_session_keys(seed: Option>) -> Vec; - - /// Decode the given public session keys. - /// - /// Returns the list of public raw public keys + key type. - fn decode_session_keys(encoded: Vec) -> Option, KeyTypeId)>>; - } -} +pub mod runtime_api; +pub use runtime_api::*; /// Number of validators in a given session. pub type ValidatorCount = u32; diff --git a/substrate/primitives/session/src/runtime_api.rs b/substrate/primitives/session/src/runtime_api.rs new file mode 100644 index 000000000000..5e508cd3dbd3 --- /dev/null +++ b/substrate/primitives/session/src/runtime_api.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use sp_core::crypto::KeyTypeId; +use sp_std::prelude::*; + +sp_api::decl_runtime_apis! { + /// Session keys runtime api. + pub trait SessionKeys { + /// Generate a set of session keys with optionally using the given seed. + /// The keys should be stored within the keystore exposed via runtime + /// externalities. + /// + /// The seed needs to be a valid `utf8` string. + /// + /// Returns the concatenated SCALE encoded public keys. + fn generate_session_keys(seed: Option>) -> Vec; + + /// Decode the given public session keys. + /// + /// Returns the list of public raw public keys + key type. + fn decode_session_keys(encoded: Vec) -> Option, sp_core::crypto::KeyTypeId)>>; + } +} diff --git a/substrate/primitives/weights/src/lib.rs b/substrate/primitives/weights/src/lib.rs index 36cf864dd538..ececb622fa0f 100644 --- a/substrate/primitives/weights/src/lib.rs +++ b/substrate/primitives/weights/src/lib.rs @@ -248,7 +248,25 @@ where } } +/// Implementor of [`WeightToFee`] such that it maps any unit of weight to a fixed fee. +pub struct FixedFee(sp_std::marker::PhantomData); + +impl WeightToFee for FixedFee +where + T: BaseArithmetic + From + Copy + Unsigned, +{ + type Balance = T; + + fn weight_to_fee(_: &Weight) -> Self::Balance { + F.into() + } +} + +/// An implementation of [`WeightToFee`] that collects no fee. +pub type NoFee = FixedFee<0, T>; + /// Implementor of [`WeightToFee`] that uses a constant multiplier. +/// /// # Example /// /// ``` diff --git a/substrate/src/lib.rs b/substrate/src/lib.rs index 409515ea505b..be9eef9952f8 100644 --- a/substrate/src/lib.rs +++ b/substrate/src/lib.rs @@ -55,9 +55,22 @@ //! is merely a matter of the Wasm blob being changed in the chain state, which is, in principle, //! same as updating an account's balance. //! -//! To learn more about the substrate architecture using some visuals, see [`substrate_diagram`]. +//! ### Architecture //! -//! `FRAME`, Substrate's default runtime development library takes the above even further by +//! Therefore, Substrate can be visualized as follows: +#![doc = simple_mermaid::mermaid!("../../docs/mermaid/substrate_simple.mmd")] +//! +//! The client and the runtime of course need to communicate. This is done through two concepts: +//! +//! 1. Host functions: a way for the (Wasm) runtime to talk to the client. All host functions are +//! defined in [`sp-io`]. For example, [`sp-io::storage`] are the set of host functions that +//! allow the runtime to read and write data to the on-chain state. +//! 2. Runtime APIs: a way for the client to talk to the Wasm runtime. Runtime APIs are defined +//! using macros and utilities in [`sp-api`]. For example, [`sp-api::Core`] is the most basic +//! runtime API that any blockchain must implement in order to be able to (re) execute blocks. +#![doc = simple_mermaid::mermaid!("../../docs/mermaid/substrate_client_runtime.mmd")] +//! +//! [`FRAME`], Substrate's default runtime development library takes the above even further by //! embracing a declarative programming model whereby correctness is enhanced and the system is //! highly configurable through parameterization. //! @@ -68,8 +81,8 @@ //! ## How to Get Stared //! //! Most developers want to leave the client side code as-is, and focus on the runtime. To do so, -//! look into the [`frame_support`] crate, which is the entry point crate into runtime development -//! with FRAME. +//! look into the [`frame`] crate, which is the entry point crate into runtime development with +//! FRAME. //! //! > Side note, it is entirely possible to craft a substrate-based runtime without FRAME, an //! > example of which can be found [here](https://github.com/JoshOrndorff/frameless-node-template). @@ -79,8 +92,8 @@ //! * **Templates**: A number of substrate-based templates exist and they can be used for various //! purposes, with zero to little additional code needed. All of these templates contain runtimes //! that are highly configurable and are likely suitable for basic needs. -//! * `FRAME`: If need, one can customize that runtime even further, by using `FRAME` and developing -//! custom modules. +//! * [`FRAME`]: If need, one can customize that runtime even further, by using `FRAME` and +//! developing custom modules. //! * **Core**: To the contrary, some developers may want to customize the client side software to //! achieve novel goals such as a new consensus engine, or a new database backend. While //! Substrate's main configurability is in the runtime, the client is also highly generic and can @@ -100,10 +113,9 @@ //! * `sp-*` (short for *substrate-primitives*) crates, located under `./primitives` folder. These //! are the traits that glue the client and runtime together, but are not opinionated about what //! framework is using for building the runtime. Notable examples are [`sp-api`] and [`sp-io`], -//! which form the communication bridge between the client and runtime, as explained in -//! [`substrate_diagram`]. +//! which form the communication bridge between the client and runtime. //! * `pallet-*` and `frame-*` crates, located under `./frame` folder. These are the crates related -//! to FRAME. See [`frame_support`] for more information. +//! to FRAME. See [`frame`] for more information. //! //! ### Wasm Build //! @@ -157,8 +169,9 @@ //! through which Polkadot can be utilized is by building "parachains", blockchains that are //! connected to Polkadot's shared security. //! -//! To build a parachain, one could use [`Cumulus`](https://github.com/paritytech/polkadot-sdk/tree/master/cumulus), the -//! library on top of Substrate, empowering any substrate-based chain to be a Polkadot parachain. +//! To build a parachain, one could use +//! [`Cumulus`](https://github.com/paritytech/polkadot-sdk/tree/master/cumulus), the library on top +//! of Substrate, empowering any substrate-based chain to be a Polkadot parachain. //! //! ## Where To Go Next? //! @@ -199,6 +212,7 @@ //! https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white //! [github]: //! https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [`FRAME`]: ../frame/index.html //! [`sp-io`]: ../sp_io/index.html //! [`sp-api`]: ../sp_api/index.html //! [`sp-api`]: ../sp_api/index.html @@ -222,76 +236,3 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] - -#[cfg_attr(doc, aquamarine::aquamarine)] -/// In this module, we explore substrate at a more depth. First, let's establish substrate being -/// divided into a client and runtime. -/// -/// ```mermaid -/// graph TB -/// subgraph Substrate -/// direction LR -/// subgraph Client -/// end -/// subgraph Runtime -/// end -/// end -/// ``` -/// -/// The client and the runtime of course need to communicate. This is done through two concepts: -/// -/// 1. Host functions: a way for the (Wasm) runtime to talk to the client. All host functions are -/// defined in [`sp-io`]. For example, [`sp-io::storage`] are the set of host functions that -/// allow the runtime to read and write data to the on-chain state. -/// 2. Runtime APIs: a way for the client to talk to the Wasm runtime. Runtime APIs are defined -/// using macros and utilities in [`sp-api`]. For example, [`sp-api::Core`] is the most basic -/// runtime API that any blockchain must implement in order to be able to (re) execute blocks. -/// -/// ```mermaid -/// graph TB -/// subgraph Substrate -/// direction LR -/// subgraph Client -/// end -/// subgraph Runtime -/// end -/// Client --runtime-api--> Runtime -/// Runtime --host-functions--> Client -/// end -/// ``` -/// -/// Finally, let's expand the diagram a bit further and look at the internals of each component: -/// -/// ```mermaid -/// graph TB -/// subgraph Substrate -/// direction LR -/// subgraph Client -/// Database -/// Networking -/// Consensus -/// end -/// subgraph Runtime -/// subgraph FRAME -/// direction LR -/// Governance -/// Currency -/// Staking -/// Identity -/// end -/// end -/// Client --runtime-api--> Runtime -/// Runtime --host-functions--> Client -/// end -/// ``` -/// -/// As noted the runtime contains all of the application specific logic of the blockchain. This is -/// usually written with `FRAME`. The client, on the other hand, contains reusable and generic -/// components that are not specific to one single blockchain, such as networking, database, and the -/// consensus engine. -/// -/// [`sp-io`]: ../../sp_io/index.html -/// [`sp-api`]: ../../sp_api/index.html -/// [`sp-io::storage`]: ../../sp_io/storage/index.html -/// [`sp-api::Core`]: ../../sp_api/trait.Core.html -pub mod substrate_diagram {} From 3069b0af398e5a0374802c31201637e97f4a530a Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Fri, 27 Oct 2023 14:50:30 +0300 Subject: [PATCH 08/69] make polkadot die graciously (#2056) While investigating some db migrations that make the node startup fail, I noticed that the node wasn't exiting and that the log file were growing exponentially, until my whole system was freezing and that makes it really hard to actually find why it was failing in the first place. E.g: ``` ls -lh /tmp/zombie-01a04c2a2c0265d85f6440cf01c0f44a_-51319-uyggzuD4wEpV/bob.log 32,6G oct 27 11:16 /tmp/zombie-01a04c2a2c0265d85f6440cf01c0f44a_-51319-uyggzuD4wEpV/bob.log ``` This was happening because the following errors were being printed continously without the subsystem main loop exiting: From dispute-coordinator: ``` WARN tokio-runtime-worker parachain::dispute-coordinator: error=Subsystem(Generated(Context("Signal channel is terminated and empty."))) ``` From availability recovery: ``` Erasure task channel closed. Node shutting down ? ``` Signed-off-by: Alexandru Gheorghe --- polkadot/node/core/dispute-coordinator/src/lib.rs | 3 ++- polkadot/node/network/availability-recovery/src/lib.rs | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index 3f0fd4013141..e96fee812409 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -27,6 +27,7 @@ use std::sync::Arc; +use error::FatalError; use futures::FutureExt; use gum::CandidateHash; @@ -431,7 +432,7 @@ impl DisputeCoordinatorSubsystem { #[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] async fn wait_for_first_leaf(ctx: &mut Context) -> Result> { loop { - match ctx.recv().await? { + match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(None), FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { if let Some(activated) = update.activated { diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index e2146981da92..9acc48ea92e0 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -822,6 +822,7 @@ async fn erasure_task_thread( target: LOG_TARGET, "Erasure task channel closed. Node shutting down ?", ); + break }, } } From 371ac2d798f74a42510e58682a22879235e839c8 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Thu, 26 Oct 2023 18:20:11 +0300 Subject: [PATCH 09/69] pallet-xcm: fix broken reserve_transfer_assets benchmark --- polkadot/xcm/pallet-xcm/Cargo.toml | 7 +- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 86 +++++++++++++++++++-- polkadot/xcm/pallet-xcm/src/mock.rs | 8 +- 3 files changed, 88 insertions(+), 13 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index eedf3041cb37..67c54f8d9bc4 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -13,7 +13,6 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive serde = { version = "1.0.188", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../substrate/frame/system", default-features = false} sp-core = { path = "../../../substrate/primitives/core", default-features = false} @@ -24,8 +23,11 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false} xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } +# marked optional, used in benchmarking +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +pallet-assets = { path = "../../../substrate/frame/assets", default-features = false, optional = true } + [dev-dependencies] -pallet-assets = { path = "../../../substrate/frame/assets" } pallet-balances = { path = "../../../substrate/frame/balances" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-parachain-primitives = { path = "../../parachain" } @@ -40,6 +42,7 @@ std = [ "frame-support/std", "frame-system/std", "log/std", + "pallet-assets/std", "scale-info/std", "serde", "sp-core/std", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index f567691b5092..4f61d34feeca 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,15 +16,58 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; -use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; -use frame_support::weights::Weight; +use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; +use frame_support::{assert_ok, traits::Currency, weights::Weight}; use frame_system::RawOrigin; +use pallet_assets::Pallet as AssetsPallet; +use sp_runtime::traits::StaticLookup; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; type RuntimeOrigin = ::RuntimeOrigin; +fn create_default_asset( + asset_id: T::AssetIdParameter, + is_sufficient: bool, + caller: T::AccountId, +) { + let beneficiary = T::Lookup::unlookup(caller); + let root = frame_system::RawOrigin::Root.into(); + assert_ok!(AssetsPallet::::force_create( + root, + asset_id, + beneficiary, + is_sufficient, + 1u32.into(), + )); +} + +fn create_default_minted_asset( + asset_id: T::AssetIdParameter, + is_sufficient: bool, + amount: T::Balance, + caller: T::AccountId, +) { + create_default_asset::(asset_id, is_sufficient, caller.clone()); + if !is_sufficient { + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); + } + let beneficiary = T::Lookup::unlookup(caller.clone()); + assert_ok!(AssetsPallet::::mint( + frame_system::RawOrigin::Signed(caller).into(), + asset_id, + beneficiary, + amount, + )); +} + benchmarks! { + where_clause { + where + T: pallet_assets::Config, + ::AssetIdParameter: From, + ::Balance: From + Into, + } send { let send_origin = T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -62,9 +105,23 @@ benchmarks! { let (assets, destination) = T::ReserveTransferableAssets::get().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + let caller: T::AccountId = whitelisted_caller(); + for asset in assets.inner() { + let amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("AssetNotFungible")), + }; + let id = match &asset.id { + Concrete(location) => *location, + _ => return Err(BenchmarkError::Stop("AssetNotFungible")), + }; + let asset_id: T::AssetIdParameter = id.into(); + create_default_minted_asset::(asset_id.clone(), true, amount.into(), caller.clone()); + // verify initial balance + assert_eq!(AssetsPallet::::balance(asset_id.into(), caller.clone()), amount.into()); + } + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; if !T::XcmReserveTransferFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) @@ -74,8 +131,23 @@ benchmarks! { let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = assets.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + let versioned_assets: VersionedMultiAssets = assets.clone().into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + for asset in assets.inner() { + let amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("AssetNotFungible")), + }; + let id = match &asset.id { + Concrete(location) => *location, + _ => return Err(BenchmarkError::Stop("AssetNotFungible")), + }; + let asset_id: T::AssetIdParameter = id.into(); + // verify balance after transfer + assert_eq!(AssetsPallet::::balance(asset_id.into(), caller.clone()), 0.into()); + } + } execute { let execute_origin = diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 984b359617f2..43974545ec45 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -35,9 +35,9 @@ use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, Case, ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, DescribeAllTerminal, - DescribeFamily, FixedRateOfFungible, FixedWeightBounds, FungiblesAdapter, HashedDescription, - IsConcrete, MatchedConvertedConcreteId, NoChecking, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, XcmFeesToAccount, + FixedRateOfFungible, FixedWeightBounds, FungiblesAdapter, HashedDescription, IsConcrete, + MatchedConvertedConcreteId, NoChecking, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, XcmFeesToAccount, }; use xcm_executor::{ traits::{Identity, JustTry}, @@ -382,7 +382,7 @@ parameter_types! { pub type SovereignAccountOf = ( ChildParachainConvertsVia, AccountId32Aliases, - HashedDescription>, + HashedDescription, ); pub type ForeignAssetsConvertedConcreteId = MatchedConvertedConcreteId< From 9b58ff0e877f3ec7214183da7cf31d9b5b6625b8 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 27 Oct 2023 12:33:08 +0300 Subject: [PATCH 10/69] try add pallet-assets for benchmarking to rococo --- Cargo.lock | 1 + polkadot/runtime/rococo/Cargo.toml | 4 ++++ polkadot/runtime/rococo/src/lib.rs | 36 ++++++++++++++++++++++++++---- 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 843083beb9ae..fd94c08a3a79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14221,6 +14221,7 @@ dependencies = [ "hex-literal", "log", "pallet-asset-rate", + "pallet-assets", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 6d0dee3e4343..337bb4142047 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -90,6 +90,7 @@ pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-feat frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +pallet-assets = { path = "../../../substrate/frame/assets", default-features = false, optional = true } hex-literal = { version = "0.4.1" } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } @@ -134,6 +135,7 @@ std = [ "log/std", "offchain-primitives/std", "pallet-asset-rate/std", + "pallet-assets/std", "pallet-authority-discovery/std", "pallet-authorship/std", "pallet-babe/std", @@ -211,6 +213,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-asset-rate/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-bounties/runtime-benchmarks", @@ -263,6 +266,7 @@ try-runtime = [ "frame-try-runtime", "frame-try-runtime/try-runtime", "pallet-asset-rate/try-runtime", + "pallet-assets/try-runtime", "pallet-authority-discovery/try-runtime", "pallet-authorship/try-runtime", "pallet-babe/try-runtime", diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index e6ad061ce069..439e010f50ff 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -67,14 +67,14 @@ use frame_support::{ genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, EitherOf, EitherOfDiverse, Everything, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, - StorageMapShim, WithdrawReasons, + fungible::HoldConsideration, AsEnsureOriginWithArg, EitherOf, EitherOfDiverse, Everything, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, + ProcessMessageError, StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, }; -use frame_system::EnsureRoot; +use frame_system::{EnsureRoot, EnsureSigned}; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::simple::IdentityInfo; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; @@ -302,6 +302,30 @@ impl pallet_balances::Config for Runtime { type MaxHolds = ConstU32<2>; } +// only used in benchmarks +#[cfg(feature = "runtime-benchmarks")] +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = MultiLocationForAssetId; + type AssetIdParameter = MultiLocationForAssetId; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = ConstU128<1>; + type AssetAccountDeposit = ConstU128<10>; + type MetadataDepositBase = ConstU128<1>; + type MetadataDepositPerByte = ConstU128<1>; + type ApprovalDeposit = ConstU128<1>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type WeightInfo = (); + type CallbackHandle = (); + type Extra = (); + type RemoveItemsLimit = ConstU32<5>; + type BenchmarkHelper = (); +} + parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; /// This value increases the priority of `Operational` transactions by adding @@ -1359,6 +1383,10 @@ construct_runtime! { // Pallet for sending XCM. XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, + // Assets pallet only used in benchmarks. + #[cfg(feature = "runtime-benchmarks")] + Assets: pallet_assets::{Pallet, Call, Storage, Config, Event} = 100, + ParasSudoWrapper: paras_sudo_wrapper::{Pallet, Call} = 250, AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event, Config} = 251, From 942977e64fb9918e6c383a22e60ac565818c2dff Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 27 Oct 2023 13:06:36 +0300 Subject: [PATCH 11/69] Revert "try add pallet-assets for benchmarking to rococo" This reverts commit c82330b614df352a0e25a8f18dbd43bc356aed4b. --- Cargo.lock | 1 - polkadot/runtime/rococo/Cargo.toml | 4 ---- polkadot/runtime/rococo/src/lib.rs | 36 ++++-------------------------- 3 files changed, 4 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fd94c08a3a79..843083beb9ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14221,7 +14221,6 @@ dependencies = [ "hex-literal", "log", "pallet-asset-rate", - "pallet-assets", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 337bb4142047..6d0dee3e4343 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -90,7 +90,6 @@ pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-feat frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -pallet-assets = { path = "../../../substrate/frame/assets", default-features = false, optional = true } hex-literal = { version = "0.4.1" } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } @@ -135,7 +134,6 @@ std = [ "log/std", "offchain-primitives/std", "pallet-asset-rate/std", - "pallet-assets/std", "pallet-authority-discovery/std", "pallet-authorship/std", "pallet-babe/std", @@ -213,7 +211,6 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-asset-rate/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-bounties/runtime-benchmarks", @@ -266,7 +263,6 @@ try-runtime = [ "frame-try-runtime", "frame-try-runtime/try-runtime", "pallet-asset-rate/try-runtime", - "pallet-assets/try-runtime", "pallet-authority-discovery/try-runtime", "pallet-authorship/try-runtime", "pallet-babe/try-runtime", diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 439e010f50ff..e6ad061ce069 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -67,14 +67,14 @@ use frame_support::{ genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, AsEnsureOriginWithArg, EitherOf, EitherOfDiverse, Everything, - InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, - ProcessMessageError, StorageMapShim, WithdrawReasons, + fungible::HoldConsideration, EitherOf, EitherOfDiverse, Everything, InstanceFilter, + KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, + StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, }; -use frame_system::{EnsureRoot, EnsureSigned}; +use frame_system::EnsureRoot; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::simple::IdentityInfo; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; @@ -302,30 +302,6 @@ impl pallet_balances::Config for Runtime { type MaxHolds = ConstU32<2>; } -// only used in benchmarks -#[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = MultiLocationForAssetId; - type AssetIdParameter = MultiLocationForAssetId; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = EnsureRoot; - type AssetDeposit = ConstU128<1>; - type AssetAccountDeposit = ConstU128<10>; - type MetadataDepositBase = ConstU128<1>; - type MetadataDepositPerByte = ConstU128<1>; - type ApprovalDeposit = ConstU128<1>; - type StringLimit = ConstU32<50>; - type Freezer = (); - type WeightInfo = (); - type CallbackHandle = (); - type Extra = (); - type RemoveItemsLimit = ConstU32<5>; - type BenchmarkHelper = (); -} - parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; /// This value increases the priority of `Operational` transactions by adding @@ -1383,10 +1359,6 @@ construct_runtime! { // Pallet for sending XCM. XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, - // Assets pallet only used in benchmarks. - #[cfg(feature = "runtime-benchmarks")] - Assets: pallet_assets::{Pallet, Call, Storage, Config, Event} = 100, - ParasSudoWrapper: paras_sudo_wrapper::{Pallet, Call} = 250, AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event, Config} = 251, From 3b9965b2b6b99d44fef881dfe638f4e07d1f04ef Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 27 Oct 2023 13:07:20 +0300 Subject: [PATCH 12/69] pallet-xcm benchmarking: most chains do not have pallet-assets, use pallet-balances instead --- polkadot/xcm/pallet-xcm/Cargo.toml | 6 +- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 95 ++++++--------------- polkadot/xcm/pallet-xcm/src/mock.rs | 7 +- 3 files changed, 36 insertions(+), 72 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 67c54f8d9bc4..2005fc0375ef 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -25,10 +25,10 @@ xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", def # marked optional, used in benchmarking frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -pallet-assets = { path = "../../../substrate/frame/assets", default-features = false, optional = true } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false, optional = true } [dev-dependencies] -pallet-balances = { path = "../../../substrate/frame/balances" } +pallet-assets = { path = "../../../substrate/frame/assets" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-parachain-primitives = { path = "../../parachain" } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder" } @@ -42,7 +42,7 @@ std = [ "frame-support/std", "frame-system/std", "log/std", - "pallet-assets/std", + "pallet-balances/std", "scale-info/std", "serde", "sp-core/std", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 4f61d34feeca..c44307f9891d 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -17,56 +17,21 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; -use frame_support::{assert_ok, traits::Currency, weights::Weight}; +use frame_support::{traits::Currency, weights::Weight}; use frame_system::RawOrigin; -use pallet_assets::Pallet as AssetsPallet; -use sp_runtime::traits::StaticLookup; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; type RuntimeOrigin = ::RuntimeOrigin; -fn create_default_asset( - asset_id: T::AssetIdParameter, - is_sufficient: bool, - caller: T::AccountId, -) { - let beneficiary = T::Lookup::unlookup(caller); - let root = frame_system::RawOrigin::Root.into(); - assert_ok!(AssetsPallet::::force_create( - root, - asset_id, - beneficiary, - is_sufficient, - 1u32.into(), - )); -} - -fn create_default_minted_asset( - asset_id: T::AssetIdParameter, - is_sufficient: bool, - amount: T::Balance, - caller: T::AccountId, -) { - create_default_asset::(asset_id, is_sufficient, caller.clone()); - if !is_sufficient { - T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); - } - let beneficiary = T::Lookup::unlookup(caller.clone()); - assert_ok!(AssetsPallet::::mint( - frame_system::RawOrigin::Signed(caller).into(), - asset_id, - beneficiary, - amount, - )); -} +// existential deposit multiplier +const ED_MULTIPLIER: u32 = 10; benchmarks! { where_clause { where - T: pallet_assets::Config, - ::AssetIdParameter: From, - ::Balance: From + Into, + T: pallet_balances::Config, + ::Balance: From + Into, } send { let send_origin = @@ -105,21 +70,28 @@ benchmarks! { let (assets, destination) = T::ReserveTransferableAssets::get().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; - let caller: T::AccountId = whitelisted_caller(); - for asset in assets.inner() { - let amount = match &asset.fun { - Fungible(amount) => *amount, - _ => return Err(BenchmarkError::Stop("AssetNotFungible")), - }; - let id = match &asset.id { - Concrete(location) => *location, - _ => return Err(BenchmarkError::Stop("AssetNotFungible")), - }; - let asset_id: T::AssetIdParameter = id.into(); - create_default_minted_asset::(asset_id.clone(), true, amount.into(), caller.clone()); - // verify initial balance - assert_eq!(AssetsPallet::::balance(asset_id.into(), caller.clone()), amount.into()); + + // most chains deploying `pallet-xcm` don't have `pallet-assets` so we're + // stuck with using native token and `pallet-balances`. + if assets.len() != 1 { + return Err(BenchmarkError::Stop("Generic benchmark supports only single native asset")) } + let asset = assets.inner().clone().pop().unwrap(); + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + let send_origin = RawOrigin::Signed(caller.clone()); let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; @@ -134,19 +106,8 @@ benchmarks! { let versioned_assets: VersionedMultiAssets = assets.clone().into(); }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) verify { - for asset in assets.inner() { - let amount = match &asset.fun { - Fungible(amount) => *amount, - _ => return Err(BenchmarkError::Stop("AssetNotFungible")), - }; - let id = match &asset.id { - Concrete(location) => *location, - _ => return Err(BenchmarkError::Stop("AssetNotFungible")), - }; - let asset_id: T::AssetIdParameter = id.into(); - // verify balance after transfer - assert_eq!(AssetsPallet::::balance(asset_id.into(), caller.clone()), 0.into()); - } + // verify balance after transfer + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance - transferred_amount); } execute { diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 43974545ec45..7afa9f581f44 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -489,8 +489,11 @@ parameter_types! { UsdtTeleportLocation::get(), )); pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - vec![ForeignAsset::get()].into(), - ForeignReserveLocation::get(), + MultiAsset { + fun: Fungible(10), + id: Concrete(Here.into_location()), + }.into(), + Parachain(OTHER_PARA_ID).into(), )); } From adcd6889b33ac7597bf9aa3ec9e22fec94294589 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 27 Oct 2023 14:38:10 +0300 Subject: [PATCH 13/69] pallet-xcm: fix teleport_assets benchmark --- polkadot/runtime/rococo/src/xcm_config.rs | 4 +-- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 39 +++++++++++++++++---- polkadot/xcm/pallet-xcm/src/mock.rs | 31 ++++++++++++---- 3 files changed, 58 insertions(+), 16 deletions(-) diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 65661b3e9334..da0aabfbaea5 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -212,12 +212,12 @@ parameter_types! { pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); // Relay/native token can be teleported to/from AH. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }.into(), AssetHub::get(), )); // We can reserve transfer native token to some random parachain. pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }.into(), Parachain(4321).into(), )); } diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index c44307f9891d..b0a37db6a783 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -51,9 +51,30 @@ benchmarks! { let (assets, destination) = T::TeleportableAssets::get().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + + // most chains deploying `pallet-xcm` don't have `pallet-assets` so we're + // stuck with using native token and `pallet-balances`. + if assets.len() != 1 { + return Err(BenchmarkError::Stop("Generic benchmark supports only single native asset")) + } + let asset = assets.inner().clone().pop().unwrap(); + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; if !T::XcmTeleportFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) @@ -64,7 +85,11 @@ benchmarks! { let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedMultiAssets = assets.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + } reserve_transfer_assets { let (assets, destination) = T::ReserveTransferableAssets::get().ok_or( @@ -103,11 +128,11 @@ benchmarks! { let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = assets.clone().into(); + let versioned_assets: VersionedMultiAssets = assets.into(); }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) verify { - // verify balance after transfer - assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance - transferred_amount); + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); } execute { diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 7afa9f581f44..e421d46f206b 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -320,6 +320,9 @@ impl pallet_assets::Config for Test { type BenchmarkHelper = XcmBenchmarkHelper; } +// This child parachain is a system parachain trusted to teleport native token. +pub const SOME_SYSTEM_PARA: u32 = 1001; + // This child parachain acts as trusted reserve for its assets in tests. // USDT allowed to teleport to/from here. pub const FOREIGN_ASSET_RESERVE_PARA_ID: u32 = 2001; @@ -340,6 +343,14 @@ pub const OTHER_PARA_ID: u32 = 2009; parameter_types! { pub const RelayLocation: MultiLocation = Here.into_location(); + pub const NativeAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(Here.into_location()), + }; + pub const SystemParachainLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(SOME_SYSTEM_PARA)) + }; pub const ForeignReserveLocation: MultiLocation = MultiLocation { parents: 0, interior: X1(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)) @@ -417,10 +428,11 @@ parameter_types! { pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); pub CurrencyPerSecondPerByte: (AssetId, u128, u128) = (Concrete(RelayLocation::get()), 1, 1); pub TrustedLocal: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); - pub TrustedUsdt: (MultiAssetFilter, MultiLocation) = (vec![Usdt::get()].into(), UsdtTeleportLocation::get()); - pub TeleportUsdtToForeign: (MultiAssetFilter, MultiLocation) = (vec![Usdt::get()].into(), ForeignReserveLocation::get()); - pub TrustedForeign: (MultiAssetFilter, MultiLocation) = (vec![ForeignAsset::get()].into(), ForeignReserveLocation::get()); - pub TrustedUsdc: (MultiAssetFilter, MultiLocation) = (vec![Usdc::get()].into(), UsdcReserveLocation::get()); + pub TrustedSystemPara: (MultiAssetFilter, MultiLocation) = (NativeAsset::get().into(), SystemParachainLocation::get()); + pub TrustedUsdt: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), UsdtTeleportLocation::get()); + pub TeleportUsdtToForeign: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), ForeignReserveLocation::get()); + pub TrustedForeign: (MultiAssetFilter, MultiLocation) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); + pub TrustedUsdc: (MultiAssetFilter, MultiLocation) = (Usdc::get().into(), UsdcReserveLocation::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; pub XcmFeesTargetAccount: AccountId = AccountId::new([167u8; 32]); @@ -449,7 +461,12 @@ impl xcm_executor::Config for XcmConfig { type AssetTransactor = AssetTransactors; type OriginConverter = LocalOriginConverter; type IsReserve = (Case, Case); - type IsTeleporter = (Case, Case, Case); + type IsTeleporter = ( + Case, + Case, + Case, + Case, + ); type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = FixedWeightBounds; @@ -485,8 +502,8 @@ parameter_types! { parameter_types! { pub ReachableDest: Option = Some(Parachain(1000).into()); pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - vec![Usdt::get()].into(), - UsdtTeleportLocation::get(), + NativeAsset::get().into(), + SystemParachainLocation::get(), )); pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( MultiAsset { From eca0c3ef19cb4e5bb6651eeee069705009675f0c Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 27 Oct 2023 14:54:18 +0300 Subject: [PATCH 14/69] fix runtimes benchmarks for pallet-xcm --- cumulus/parachain-template/runtime/src/xcm_config.rs | 7 ++----- .../assets/asset-hub-kusama/src/xcm_config.rs | 8 ++++---- .../assets/asset-hub-polkadot/src/xcm_config.rs | 8 ++++---- .../assets/asset-hub-rococo/src/xcm_config.rs | 8 ++++---- .../assets/asset-hub-westend/src/xcm_config.rs | 8 ++++---- .../bridge-hubs/bridge-hub-kusama/src/xcm_config.rs | 2 +- .../bridge-hub-polkadot/src/xcm_config.rs | 2 +- .../bridge-hubs/bridge-hub-rococo/src/xcm_config.rs | 2 +- .../collectives-polkadot/src/xcm_config.rs | 2 +- .../contracts/contracts-rococo/src/xcm_config.rs | 9 +++------ .../runtimes/testing/penpal/src/xcm_config.rs | 2 +- .../runtimes/testing/rococo-parachain/src/lib.rs | 12 ++---------- polkadot/runtime/rococo/src/xcm_config.rs | 2 +- polkadot/runtime/westend/src/xcm_config.rs | 6 +++--- 14 files changed, 32 insertions(+), 46 deletions(-) diff --git a/cumulus/parachain-template/runtime/src/xcm_config.rs b/cumulus/parachain-template/runtime/src/xcm_config.rs index 0b82dba25f28..9f7de9895fae 100644 --- a/cumulus/parachain-template/runtime/src/xcm_config.rs +++ b/cumulus/parachain-template/runtime/src/xcm_config.rs @@ -155,11 +155,8 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Teleports are disabled pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - // We can reserve transfer relay/native token between us and Relay. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); + // Reserve transfers are disabled. + pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; } impl pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs index 5cd821fe9aa8..ea28ec91832a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs @@ -557,13 +557,13 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported between AH and Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); - // We can reserve transfer some local token to Relay. + // We can reserve transfer native token to some random parachain. pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into()) }.into(), - Parent.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), + Parachain(43211234).into(), )); } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs index d23e209adcef..71dc71f477e1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs @@ -481,13 +481,13 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported between AH and Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); - // We can reserve transfer some local token to Relay. + // We can reserve transfer native token to some random parachain. pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into()) }.into(), - Parent.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), + Parachain(43211234).into(), )); } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 8127dbfb87f7..441589aa8641 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -660,13 +660,13 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported between AH and Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); - // We can reserve transfer some local token to Relay. + // We can reserve transfer native token to some random parachain. pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into()) }.into(), - Parent.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), + Parachain(43211234).into(), )); } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 08d27d7de573..dc4e4372816d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -590,13 +590,13 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported between AH and Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); - // We can reserve transfer some local token to Relay. + // We can reserve transfer native token to some random parachain. pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into()) }.into(), - Parent.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), + Parachain(43211234).into(), )); } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs index 99b00c641e27..e65f346137bf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs @@ -246,7 +246,7 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported between BH and Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); // Reserve transfers are disabled on BH. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs index c4827268362c..1a3d4a938a97 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs @@ -250,7 +250,7 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported between BH and Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); // Reserve transfers are disabled on BH. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 03909a68f7dc..6646e5edf94f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -319,7 +319,7 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported between BH and Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); // Reserve transfers are disabled on BH. diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs index b6496ddc5056..1f673a1ee29b 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs @@ -298,7 +298,7 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported between BH and Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); // Reserve transfers are disabled on Collectives. diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index d02bcff4150c..fc55fc6a4cb4 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -228,14 +228,11 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported to Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); - // Act as reserve for native token when sending to random parachain. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), - Parachain(4321).into(), - )); + // Reserve transfers are disabled on Contracts. + pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; } impl pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 289f920253b6..a17f8e5b99ac 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -317,7 +317,7 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); // Relay/native token can be teleported to Relay. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), Parent.into(), )); // Disable reserve transfers benchmarks for penpal. diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 78cefad66372..84181b1da31d 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -470,16 +470,8 @@ pub type XcmRouter = WithUniqueTopic<( #[cfg(feature = "runtime-benchmarks")] parameter_types! { pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported to/from Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // We can reserve transfer some AH local token to/from AH. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(SystemAssetHubLocation::get()) }.into(), - SystemAssetHubLocation::get(), - )); + pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; + pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; } impl pallet_xcm::Config for Runtime { diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index da0aabfbaea5..24333aea9d91 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -218,7 +218,7 @@ parameter_types! { // We can reserve transfer native token to some random parachain. pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }.into(), - Parachain(4321).into(), + Parachain(43211234).into(), )); } diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index fe61725bd6d8..9e90b5640fb9 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -124,13 +124,13 @@ parameter_types! { pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); // Relay/native token can be teleported to/from AH. pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into()) }.into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }.into(), AssetHub::get(), )); // We can reserve transfer native token to some random parachain. pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into()) }.into(), - Parachain(4321).into(), + MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }.into(), + Parachain(43211234).into(), )); } From b648df2ba4b31e2bf45b2e1da2596de2ff6bee1a Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 27 Oct 2023 16:07:37 +0300 Subject: [PATCH 15/69] AHs simplify test_cases_over_bridge --- .../test-utils/src/test_cases_over_bridge.rs | 87 +++++-------------- 1 file changed, 21 insertions(+), 66 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 9d8ca0e0042a..2cb304d22388 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -16,7 +16,7 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets transferred //! over a bridge. -use crate::assert_matches_reserve_asset_deposited_instructions; +use crate::{assert_matches_reserve_asset_deposited_instructions, get_fungible_delivery_fees}; use codec::Encode; use cumulus_primitives_core::XcmpMessageSource; use frame_support::{ @@ -32,10 +32,7 @@ use parachains_runtimes_test_utils::{ use sp_runtime::{traits::StaticLookup, Saturating}; use xcm::{latest::prelude::*, VersionedMultiAssets}; use xcm_builder::{CreateMatcher, MatchXcm}; -use xcm_executor::{ - traits::{ConvertLocation, TransactAsset}, - XcmExecutor, -}; +use xcm_executor::{traits::ConvertLocation, XcmExecutor}; pub struct TestBridgingConfig { pub bridged_network: NetworkId, @@ -129,9 +126,13 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< &alice, ); - // drip ED to account + // we calculate exact delivery fees _after_ sending the message by weighing the sent + // xcm, and this delivery fee varies for different runtimes, so just add enough buffer, + // then verify the arithmetics check out on final balance. + let delivery_fees_buffer = 40_000_000_000u128; + // drip ED + transfer_amount + delivery_fees_buffer to Alice account let alice_account_init_balance = - existential_deposit.saturating_mul(2.into()) + balance_to_transfer.into(); + existential_deposit + balance_to_transfer.into() + delivery_fees_buffer.into(); let _ = >::deposit_creating( &alice_account, alice_account_init_balance, @@ -184,56 +185,6 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< let expected_beneficiary = target_destination_account; - // Make sure sender has enough funds for paying delivery fees - let handling_delivery_fees = { - // Probable XCM with `ReserveAssetDeposited`. - let mut expected_reserve_asset_deposited_message = Xcm(vec![ - ReserveAssetDeposited(MultiAssets::from(expected_assets.clone())), - ClearOrigin, - BuyExecution { - fees: MultiAsset { - id: Concrete(Default::default()), - fun: Fungible(balance_to_transfer), - }, - weight_limit: Unlimited, - }, - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: expected_beneficiary }, - SetTopic([ - 220, 188, 144, 32, 213, 83, 111, 175, 44, 210, 111, 19, 90, 165, 191, 112, - 140, 247, 192, 124, 42, 17, 153, 141, 114, 34, 189, 20, 83, 69, 237, 173, - ]), - ]); - assert_matches_reserve_asset_deposited_instructions( - &mut expected_reserve_asset_deposited_message, - &expected_assets, - &expected_beneficiary, - ); - - // Call `SendXcm::validate` to get delivery fees. - let (_, delivery_fees): (_, MultiAssets) = XcmConfig::XcmSender::validate( - &mut Some(target_location_from_different_consensus), - &mut Some(expected_reserve_asset_deposited_message), - ) - .expect("validate passes"); - // Drip delivery fee to Alice account. - let mut delivery_fees_added = false; - for delivery_fee in delivery_fees.inner() { - assert_ok!(::deposit_asset( - &delivery_fee, - &MultiLocation { - parents: 0, - interior: X1(AccountId32 { - network: None, - id: alice_account.clone().into(), - }), - }, - None, - )); - delivery_fees_added = true; - } - delivery_fees_added - }; - // do pallet_xcm call reserve transfer assert_ok!(>::limited_reserve_transfer_assets( RuntimeHelper::::origin_of(alice_account.clone()), @@ -276,6 +227,7 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< // check sent XCM ExportMessage to BridgeHub + let mut delivery_fees = 0; // 1. check paid or unpaid if let Some(expected_fee_asset_id) = maybe_paid_export_message { xcm_sent @@ -316,6 +268,10 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< .split_global() .expect("split works"); assert_eq!(destination, &target_location_junctions_without_global_consensus); + // Call `SendXcm::validate` to get delivery fees. + delivery_fees = get_fungible_delivery_fees::< + ::XcmSender, + >(target_location_from_different_consensus, inner_xcm.clone()); assert_matches_reserve_asset_deposited_instructions( inner_xcm, &expected_assets, @@ -331,8 +287,8 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< assert_eq!( >::free_balance(&alice_account), alice_account_init_balance - .saturating_sub(existential_deposit) .saturating_sub(balance_to_transfer.into()) + .saturating_sub(delivery_fees.into()) ); // check reserve account increased by balance_to_transfer @@ -342,14 +298,13 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< ); // check dedicated account increased by delivery fees (if configured) - if handling_delivery_fees { - if let Some(delivery_fees_account) = delivery_fees_account { - let delivery_fees_account_balance_after = - >::free_balance(&delivery_fees_account); - assert!( - delivery_fees_account_balance_after > delivery_fees_account_balance_before - ); - } + if let Some(delivery_fees_account) = delivery_fees_account { + let delivery_fees_account_balance_after = + >::free_balance(&delivery_fees_account); + assert!( + delivery_fees_account_balance_after - delivery_fees.into() >= + delivery_fees_account_balance_before + ); } }) } From 9643a3adf8c2d2bf2a8e496e06702013d547cd55 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 27 Oct 2023 15:58:55 +0200 Subject: [PATCH 16/69] Enable "cargo run --bin substrate-node" (#1927) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 66271139dfd4..2c63aabf9352 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -456,7 +456,7 @@ members = [ "substrate/utils/prometheus", "substrate/utils/wasm-builder", ] -default-members = [ "polkadot" ] +default-members = [ "polkadot", "substrate/bin/node/cli" ] [profile.release] # Polkadot runtime requires unwinding. From da7d8ff110fe9ee1aca3df47e438c42dc232165e Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 27 Oct 2023 18:04:18 +0300 Subject: [PATCH 17/69] runtimes: add dedicated benchmarking config for pallet-xcm --- .../runtime/src/xcm_config.rs | 15 ---- .../assets/asset-hub-kusama/src/lib.rs | 36 +++++++++- .../assets/asset-hub-kusama/src/xcm_config.rs | 21 ------ .../assets/asset-hub-polkadot/src/lib.rs | 36 +++++++++- .../asset-hub-polkadot/src/xcm_config.rs | 21 ------ .../assets/asset-hub-rococo/src/lib.rs | 36 +++++++++- .../assets/asset-hub-rococo/src/xcm_config.rs | 21 ------ .../assets/asset-hub-westend/src/lib.rs | 36 +++++++++- .../asset-hub-westend/src/xcm_config.rs | 21 ------ .../bridge-hubs/bridge-hub-kusama/src/lib.rs | 26 ++++++- .../bridge-hub-kusama/src/xcm_config.rs | 18 ----- .../bridge-hub-polkadot/src/lib.rs | 26 ++++++- .../bridge-hub-polkadot/src/xcm_config.rs | 18 ----- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 26 ++++++- .../bridge-hub-rococo/src/xcm_config.rs | 18 ----- .../collectives-polkadot/src/lib.rs | 26 ++++++- .../collectives-polkadot/src/xcm_config.rs | 18 ----- .../contracts/contracts-rococo/src/lib.rs | 27 +++++++- .../contracts-rococo/src/xcm_config.rs | 18 ----- .../runtimes/testing/penpal/src/xcm_config.rs | 18 ----- .../testing/rococo-parachain/src/lib.rs | 13 ---- polkadot/runtime/rococo/src/lib.rs | 32 ++++++++- polkadot/runtime/rococo/src/xcm_config.rs | 21 ------ .../runtime/test-runtime/src/xcm_config.rs | 13 ---- polkadot/runtime/westend/src/lib.rs | 31 ++++++++- polkadot/runtime/westend/src/xcm_config.rs | 21 ------ polkadot/xcm/pallet-xcm/src/benchmarking.rs | 69 ++++++++++++++----- polkadot/xcm/pallet-xcm/src/lib.rs | 22 +----- polkadot/xcm/pallet-xcm/src/mock.rs | 40 +++++------ .../xcm/xcm-builder/src/tests/pay/mock.rs | 13 ---- polkadot/xcm/xcm-builder/tests/mock/mod.rs | 13 ---- .../xcm-simulator/example/src/parachain.rs | 13 ---- .../xcm-simulator/example/src/relay_chain.rs | 13 ---- .../xcm/xcm-simulator/fuzzer/src/parachain.rs | 13 ---- .../xcm-simulator/fuzzer/src/relay_chain.rs | 13 ---- 35 files changed, 397 insertions(+), 425 deletions(-) diff --git a/cumulus/parachain-template/runtime/src/xcm_config.rs b/cumulus/parachain-template/runtime/src/xcm_config.rs index 9f7de9895fae..752137c96f18 100644 --- a/cumulus/parachain-template/runtime/src/xcm_config.rs +++ b/cumulus/parachain-template/runtime/src/xcm_config.rs @@ -150,15 +150,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Teleports are disabled - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - // Reserve transfers are disabled. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -187,12 +178,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index bc17fcada23f..f83ed416f327 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -929,7 +929,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1167,6 +1167,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1210,6 +1211,39 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parachain(random_para_id).into(), + )) + } + } + use xcm::latest::prelude::*; use xcm_config::{KsmLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs index ea28ec91832a..176a6d023178 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs @@ -552,21 +552,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported between AH and Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // We can reserve transfer native token to some random parachain. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parachain(43211234).into(), - )); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -599,12 +584,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index 7033e1c2dcac..d2f318ab11e7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -833,7 +833,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1047,6 +1047,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1089,6 +1090,39 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parachain(random_para_id).into(), + )) + } + } + use xcm::latest::prelude::*; use xcm_config::{DotLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs index 71dc71f477e1..3910e68bf206 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs @@ -476,21 +476,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported between AH and Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // We can reserve transfer native token to some random parachain. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parachain(43211234).into(), - )); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -523,12 +508,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 1ce504d6704f..44fa05b9924e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1008,7 +1008,7 @@ mod benches { [pallet_xcm_bridge_hub_router, ToWococo] [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1246,6 +1246,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1298,6 +1299,39 @@ impl_runtime_apis! { Config as XcmBridgeHubRouterConfig, }; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parachain(random_para_id).into(), + )) + } + } + impl XcmBridgeHubRouterConfig for Runtime { fn make_congested() { cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 441589aa8641..6aebaf9be6d2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -655,21 +655,6 @@ pub type XcmRouter = WithUniqueTopic<( ToRococoXcmRouter, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported between AH and Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // We can reserve transfer native token to some random parachain. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parachain(43211234).into(), - )); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -709,12 +694,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 30d384222422..af7e41f81236 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -947,7 +947,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1231,6 +1231,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1274,6 +1275,39 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parachain(random_para_id).into(), + )) + } + } + use xcm::latest::prelude::*; use xcm_config::{MaxAssetsIntoHolding, WestendLocation}; use pallet_xcm_benchmarks::asset_instance_from; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index dc4e4372816d..373d6dc3b588 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -585,21 +585,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported between AH and Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // We can reserve transfer native token to some random parachain. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parachain(43211234).into(), - )); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -628,12 +613,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index 9fdf8380bc3a..7ff1e1cdb356 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -456,7 +456,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -636,6 +636,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -671,6 +672,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::KsmRelayLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs index e65f346137bf..218da1a9bb61 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs @@ -241,18 +241,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported between BH and Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // Reserve transfers are disabled on BH. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -284,12 +272,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index 6ce1df992957..39238689d458 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -456,7 +456,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -636,6 +636,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -671,6 +672,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::DotRelayLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs index 1a3d4a938a97..727990d85f49 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs @@ -245,18 +245,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported between BH and Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // Reserve transfers are disabled on BH. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -288,12 +276,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 326acc812a27..77f6d8cdd1ea 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -535,7 +535,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -816,6 +816,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -860,6 +861,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::TokenLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 6646e5edf94f..01624c574344 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -314,18 +314,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported between BH and Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // Reserve transfers are disabled on BH. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmRouter = XcmRouter; @@ -356,12 +344,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index edfbfa851fe3..258ecd54f901 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -719,7 +719,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_alliance, Alliance] [pallet_collective, AllianceMotion] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] [pallet_preimage, Preimage] [pallet_scheduler, Scheduler] [pallet_referenda, FellowshipReferenda] @@ -907,6 +907,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -936,6 +937,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported between Collectives and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Reserve transfers are disabled on Collectives. + None + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs index 1f673a1ee29b..cab853741ded 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs @@ -293,18 +293,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported between BH and Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // Reserve transfers are disabled on Collectives. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - /// Type to convert the Fellows origin to a Plurality `MultiLocation` value. pub type FellowsToPlurality = OriginToPluralityVoice; @@ -335,12 +323,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 71733d48e815..4353d209fe21 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -405,7 +405,7 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [pallet_contracts, Contracts] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] ); } @@ -650,6 +650,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -679,6 +680,30 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use xcm::latest::prelude::*; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported between Contracts-System-Para and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Reserve transfers are disabled on Contracts-System-Para. + None + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index fc55fc6a4cb4..ef52ac002412 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -223,18 +223,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported to Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // Reserve transfers are disabled on Contracts. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -264,12 +252,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index a17f8e5b99ac..542d07fbed95 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -312,18 +312,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - // Relay/native token can be teleported to Relay. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parent.into(), - )); - // Disable reserve transfers benchmarks for penpal. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -352,12 +340,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 84181b1da31d..01fcd4d3c1d5 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -467,13 +467,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -498,12 +491,6 @@ impl pallet_xcm::Config for Runtime { type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index e6ad061ce069..6171576d30c0 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1572,7 +1572,7 @@ mod benches { [pallet_asset_rate, AssetRate] [pallet_whitelist, Whitelist] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::] [pallet_xcm_benchmarks::generic, pallet_xcm_benchmarks::generic::Pallet::] ); @@ -2048,6 +2048,8 @@ sp_api::impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -2065,6 +2067,7 @@ sp_api::impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use sp_storage::TrackedStorageKey; use xcm::latest::prelude::*; use xcm_config::{ @@ -2081,6 +2084,33 @@ sp_api::impl_runtime_apis! { impl frame_system_benchmarking::Config for Runtime {} impl frame_benchmarking::baseline::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }.into(), + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }.into(), + Parachain(43211234).into(), + )) + } + } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = XcmConfig; type AccountIdConverter = LocationConverter; diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 24333aea9d91..54828e0b8a60 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -207,21 +207,6 @@ parameter_types! { pub const FellowsBodyId: BodyId = BodyId::Technical; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); - // Relay/native token can be teleported to/from AH. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }.into(), - AssetHub::get(), - )); - // We can reserve transfer native token to some random parachain. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }.into(), - Parachain(43211234).into(), - )); -} - /// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior /// location of this chain. pub type LocalOriginToLocation = ( @@ -276,10 +261,4 @@ impl pallet_xcm::Config for Runtime { type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; type AdminOrigin = EnsureRoot; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index aa9e29795fcb..ae4faecf7001 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -127,13 +127,6 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(xcm::latest::Junctions::Here.into()); - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for crate::Runtime { // The config types here are entirely configurable, since the only one that is sorely needed // is `XcmExecutor`, which will be used in unit tests located in xcm-executor. @@ -160,10 +153,4 @@ impl pallet_xcm::Config for crate::Runtime { type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; type AdminOrigin = EnsureRoot; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 9ee4f3cf23e5..79145855d71a 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1616,7 +1616,7 @@ mod benches { [pallet_whitelist, Whitelist] [pallet_asset_rate, AssetRate] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -2131,6 +2131,7 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -2158,12 +2159,40 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; impl pallet_session_benchmarking::Config for Runtime {} impl pallet_offences_benchmarking::Config for Runtime {} impl pallet_election_provider_support_benchmarking::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }.into(), + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }.into(), + crate::Junction::Parachain(43211234).into(), + )) + } + } impl frame_system_benchmarking::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} impl runtime_parachains::disputes::slashing::benchmarking::Config for Runtime {} diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 9e90b5640fb9..470f1252b191 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -119,21 +119,6 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); - // Relay/native token can be teleported to/from AH. - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }.into(), - AssetHub::get(), - )); - // We can reserve transfer native token to some random parachain. - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { fun: Fungible(crate::EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }.into(), - Parachain(43211234).into(), - )); -} - pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, @@ -273,10 +258,4 @@ impl pallet_xcm::Config for Runtime { type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; type AdminOrigin = EnsureRoot; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index b0a37db6a783..ab2941b10de3 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -25,7 +25,40 @@ use xcm::{latest::prelude::*, v2}; type RuntimeOrigin = ::RuntimeOrigin; // existential deposit multiplier -const ED_MULTIPLIER: u32 = 10; +const ED_MULTIPLIER: u32 = 100; + +/// Pallet we're benchmarking here. +pub struct Pallet(crate::Pallet); + +/// Trait that must be implemented by runtime to be able to benchmark pallet properly. +pub trait Config: crate::Config { + /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. + /// + /// If `None`, the benchmarks that depend on a reachable destination will be skipped. + fn reachable_dest() -> Option { + None + } + + /// A `(MultiAssets, MultiLocation)` pair representing assets and the destination they can + /// be teleported to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will be skipped. + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + None + } + + /// A `(MultiAssets, MultiLocation)` pair representing assets and the destination they can + /// be reserve-transferred to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will be skipped. + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + None + } +} benchmarks! { where_clause { @@ -40,7 +73,7 @@ benchmarks! { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let msg = Xcm(vec![ClearOrigin]); - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_dest: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); @@ -48,7 +81,7 @@ benchmarks! { }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) teleport_assets { - let (assets, destination) = T::TeleportableAssets::get().ok_or( + let (assets, destination) = T::teleportable_assets_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; @@ -92,7 +125,7 @@ benchmarks! { } reserve_transfer_assets { - let (assets, destination) = T::ReserveTransferableAssets::get().ok_or( + let (assets, destination) = T::reserve_transferable_assets_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; @@ -148,7 +181,7 @@ benchmarks! { }: _>(execute_origin, Box::new(versioned_msg), Weight::zero()) force_xcm_version { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let xcm_version = 2; @@ -157,18 +190,18 @@ benchmarks! { force_default_xcm_version {}: _(RawOrigin::Root, Some(2)) force_subscribe_version_notify { - let versioned_loc: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_loc: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_unsubscribe_version_notify { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let versioned_loc: VersionedMultiLocation = loc.into(); - let _ = Pallet::::request_version_notify(loc); + let _ = crate::Pallet::::request_version_notify(loc); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_suspension {}: _(RawOrigin::Root, true) @@ -178,7 +211,7 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); SupportedVersion::::insert(old_version, loc, old_version); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); } migrate_version_notifiers { @@ -186,22 +219,22 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifiers::::insert(old_version, loc, 0); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); } already_notified_target { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads(1))), )?; let loc = VersionedMultiLocation::from(loc); let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_current_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); @@ -209,7 +242,7 @@ benchmarks! { let old_version = current_version - 1; VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_target_migration_fail { @@ -223,7 +256,7 @@ benchmarks! { let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, bad_loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_version_notify_targets { @@ -232,18 +265,18 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_and_notify_old_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); let old_version = T::AdvertisedXcmVersion::get() - 1; VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } impl_benchmark_test_suite!( diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 8d9bc7373c6c..186dd2d51197 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "runtime-benchmarks")] -mod benchmarking; +pub mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] @@ -261,26 +261,6 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - - /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. - /// - /// If `None`, the benchmarks that depend on a reachable destination will be skipped. - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest: Get>; - - /// A `(MultiAssets, MultiLocation)` pair representing assets and the destination they can - /// be teleported to. Used only in benchmarks. - /// - /// If `None`, the benchmarks that depend on `TeleportableAssets` will be skipped. - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets: Get>; - - /// A `(MultiAssets, MultiLocation)` pair representing assets and the destination they can - /// be reserve-transferred to. Used only in benchmarks. - /// - /// If `None`, the benchmarks that depend on `ReserveTransferableAssets` will be skipped. - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets: Get>; } #[pallet::event] diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index e421d46f206b..b88734b9abfb 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -498,22 +498,6 @@ parameter_types! { pub static AdvertisedXcmVersion: pallet_xcm::XcmVersion = 3; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - NativeAsset::get().into(), - SystemParachainLocation::get(), - )); - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = Some(( - MultiAsset { - fun: Fungible(10), - id: Concrete(Here.into_location()), - }.into(), - Parachain(OTHER_PARA_ID).into(), - )); -} - impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -538,12 +522,6 @@ impl pallet_xcm::Config for Test { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl origin::Config for Test {} @@ -554,6 +532,24 @@ impl pallet_test_notifier::Config for Test { type RuntimeCall = RuntimeCall; } +#[cfg(feature = "runtime-benchmarks")] +impl super::benchmarking::Config for Test { + fn reachable_dest() -> Option { + Some(Parachain(1000).into()) + } + + fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + Some((NativeAsset::get().into(), SystemParachainLocation::get())) + } + + fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + Some(( + MultiAsset { fun: Fungible(10), id: Concrete(Here.into_location()) }.into(), + Parachain(OTHER_PARA_ID).into(), + )) + } +} + pub(crate) fn last_event() -> RuntimeEvent { System::events().pop().expect("RuntimeEvent expected").event } diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index b4c2d9ae3dc7..78b9284c689f 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -246,13 +246,6 @@ type SovereignAccountOf = ( HashedDescription>, ); -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -277,12 +270,6 @@ impl pallet_xcm::Config for Test { type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; type AdminOrigin = EnsureRoot; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } pub const UNITS: Balance = 1_000_000_000_000; diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index c9c57d8bf7ad..4f183c7a15b6 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -210,13 +210,6 @@ impl xcm_executor::Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Here.into()); - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type UniversalLocation = UniversalLocation; @@ -242,12 +235,6 @@ impl pallet_xcm::Config for Runtime { type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; type AdminOrigin = EnsureRoot; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } impl origin::Config for Runtime {} diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs index 0e1e3fd8f89f..9f0411970ce7 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain.rs @@ -399,13 +399,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - pub struct TrustedLockerCase(PhantomData); impl> ContainsPair for TrustedLockerCase @@ -446,12 +439,6 @@ impl pallet_xcm::Config for Runtime { type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; type AdminOrigin = EnsureRoot; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } type Block = frame_system::mocking::MockBlock; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index f68e94df1fa0..bdd7ff6d3eaf 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -199,13 +199,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -231,12 +224,6 @@ impl pallet_xcm::Config for Runtime { type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; type AdminOrigin = EnsureRoot; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } parameter_types! { diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index f47476dcb615..41234837aca0 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -313,13 +313,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -344,12 +337,6 @@ impl pallet_xcm::Config for Runtime { type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; type AdminOrigin = EnsureRoot; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } type Block = frame_system::mocking::MockBlock; diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 2a843acf62c1..c9a57db970a7 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -163,13 +163,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); - pub TeleportableAssets: Option<(MultiAssets, MultiLocation)> = None; - pub ReserveTransferableAssets: Option<(MultiAssets, MultiLocation)> = None; -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -195,12 +188,6 @@ impl pallet_xcm::Config for Runtime { type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; type AdminOrigin = EnsureRoot; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - #[cfg(feature = "runtime-benchmarks")] - type TeleportableAssets = TeleportableAssets; - #[cfg(feature = "runtime-benchmarks")] - type ReserveTransferableAssets = ReserveTransferableAssets; } parameter_types! { From bea8baed691496816ac50794855cc0e9a5b5cd79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 27 Oct 2023 20:35:59 +0200 Subject: [PATCH 18/69] level-monitor: Fix issue with warp syncing (#2053) When warp syncing a node we import a header of the parachain around the tip of the chain. This header is currently not imported as finalized block (should be fixed at some point as well), the parent headers are not yet present (still being synced) and thus, we run into a panic. Even if there is a case where a leaf could not be found in the database, this probably means that the db is broken and it will fail somewhere elese. --- .../consensus/common/src/level_monitor.rs | 24 ++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/cumulus/client/consensus/common/src/level_monitor.rs b/cumulus/client/consensus/common/src/level_monitor.rs index 5f115ec2c4a3..270e3f57ae5a 100644 --- a/cumulus/client/consensus/common/src/level_monitor.rs +++ b/cumulus/client/consensus/common/src/level_monitor.rs @@ -98,7 +98,6 @@ where /// /// Level limits are not enforced during this phase. fn restore(&mut self) { - const ERR_MSG: &str = "route from finalized to leaf should be available; qed"; let info = self.backend.blockchain().info(); log::debug!( @@ -112,7 +111,14 @@ where self.import_counter = info.finalized_number; for leaf in self.backend.blockchain().leaves().unwrap_or_default() { - let mut meta = self.backend.blockchain().header_metadata(leaf).expect(ERR_MSG); + let Ok(mut meta) = self.backend.blockchain().header_metadata(leaf) else { + log::debug!( + target: LOG_TARGET, + "Could not fetch header metadata for leaf: {leaf:?}", + ); + + continue + }; self.import_counter = self.import_counter.max(meta.number); @@ -123,7 +129,19 @@ where if meta.number <= self.lowest_level { break } - meta = self.backend.blockchain().header_metadata(meta.parent).expect(ERR_MSG); + + meta = match self.backend.blockchain().header_metadata(meta.parent) { + Ok(m) => m, + Err(_) => { + // This can happen after we have warp synced a node. + log::debug!( + target: LOG_TARGET, + "Could not fetch header metadata for parent: {:?}", + meta.parent, + ); + break + }, + } } } From f46f5a90f688b213faa502780a45a1598f32703b Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Fri, 27 Oct 2023 15:43:51 -0400 Subject: [PATCH 19/69] upgrade to docify 0.2.6 (#2069) Updates `docify` to 0.2.6, which fixes a bug that was preventing nesting `#[docify::export]` within sub-items of items that already have `#[docify::export]` attached to them from working properly. Release notes here: https://github.com/sam0x17/docify/releases/tag/v0.2.6 cc @ggwpez @kianenigma --- Cargo.lock | 8 ++++---- substrate/frame/bags-list/Cargo.toml | 2 +- substrate/frame/fast-unstake/Cargo.toml | 2 +- substrate/frame/paged-list/Cargo.toml | 2 +- substrate/frame/scheduler/Cargo.toml | 2 +- substrate/frame/sudo/Cargo.toml | 2 +- substrate/frame/support/Cargo.toml | 2 +- substrate/frame/timestamp/Cargo.toml | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c65b1392535..60790128506f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4539,18 +4539,18 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80bf86c286159ed2d70e9ff5c4de69b793ab8632c8a1d276d44bbff36f052f64" +checksum = "4235e9b248e2ba4b92007fe9c646f3adf0ffde16dc74713eacc92b8bc58d8d2f" dependencies = [ "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b5ac3bdcdc56f2317e51884a90bd6f595febd6d029cdb75174162107072a8a3" +checksum = "47020e12d7c7505670d1363dd53d6c23724f71a90a3ae32ff8eba40de8404626" dependencies = [ "common-path", "derive-syn-parse", diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index f6b8335b311d..05b86f6c7239 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -27,7 +27,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau # third party log = { version = "0.4.17", default-features = false } -docify = "0.2.5" +docify = "0.2.6" aquamarine = { version = "0.3.2" } # Optional imports for benchmarking diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index ad502f03d187..2aa2e918f3e3 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -27,7 +27,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -docify = "0.2.5" +docify = "0.2.6" [dev-dependencies] pallet-staking-reward-curve = { path = "../staking/reward-curve" } diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index 194201b715c0..4bc3dd6a3c7a 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } -docify = "0.2.5" +docify = "0.2.6" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index 6f8f59738853..6aa81baf7ac6 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -20,7 +20,7 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} sp-weights = { path = "../../primitives/weights", default-features = false} -docify = "0.2.5" +docify = "0.2.6" [dev-dependencies] pallet-preimage = { path = "../preimage" } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index 5663dc0dea8c..ef507a953164 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -22,7 +22,7 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} -docify = "0.2.5" +docify = "0.2.6" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index e6edaf22f108..82942c3a4f15 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -43,7 +43,7 @@ k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features=false} serde_json = { version = "1.0.107", default-features = false, features = ["alloc"] } -docify = "0.2.5" +docify = "0.2.6" static_assertions = "1.1.0" aquamarine = { version = "0.3.2" } diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index a4d0ec087622..e23ded725d89 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -27,7 +27,7 @@ sp-std = { path = "../../primitives/std", default-features = false} sp-storage = { path = "../../primitives/storage", default-features = false} sp-timestamp = { path = "../../primitives/timestamp", default-features = false} -docify = "0.2.5" +docify = "0.2.6" [dev-dependencies] sp-core = { path = "../../primitives/core" } From a70617124b4c0c730016a210e2b8b343c829d784 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Sat, 28 Oct 2023 10:23:19 +1100 Subject: [PATCH 20/69] Automatically build and attach production and dev runtimes to GH releases (#2054) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/paritytech/release-engineering/issues/6 Adds a new Github Workflow which on a new release being created, builds and attaches all runtimes managed in this repository in two flavours: - `dev-debug-build`: Built with the `try-runtime` feature and has logging enabled - `on-chain-release`: Built with the regular old `on-chain-release` feature The new Github Workflow could be extended in the future by the @paritytech/release-engineering team to fully automate the release process if they choose to, similar to how it is fully automated in the Fellowship repo (https://github.com/polkadot-fellows/runtimes/blob/main/.github/workflows/release.yml). The `on-chain-release` did not exist for parachains, so I added it. --- Tested on my fork: - https://github.com/liamaharon/polkadot-sdk/actions/runs/6663773523 - https://github.com/liamaharon/polkadot-sdk/releases/tag/test-6 --------- Co-authored-by: Chevdor Co-authored-by: Dónal Murray --- .../build-and-attach-release-runtimes.yml | 69 +++++++++++++++++++ .../assets/asset-hub-rococo/Cargo.toml | 5 ++ .../assets/asset-hub-westend/Cargo.toml | 5 ++ .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 5 ++ .../contracts/contracts-rococo/Cargo.toml | 5 ++ .../glutton/glutton-kusama/Cargo.toml | 5 ++ polkadot/runtime/rococo/Cargo.toml | 2 +- polkadot/runtime/westend/Cargo.toml | 2 +- 8 files changed, 96 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/build-and-attach-release-runtimes.yml diff --git a/.github/workflows/build-and-attach-release-runtimes.yml b/.github/workflows/build-and-attach-release-runtimes.yml new file mode 100644 index 000000000000..297f7a1665b2 --- /dev/null +++ b/.github/workflows/build-and-attach-release-runtimes.yml @@ -0,0 +1,69 @@ +name: Build and Attach Runtimes to Releases/RC + +on: + release: + types: + - created + +env: + PROFILE: production + +jobs: + build_and_upload: + strategy: + matrix: + runtime: + - { name: westend, package: westend-runtime, path: polkadot/runtime/westend } + - { name: rococo, package: rococo-runtime, path: polkadot/runtime/rococo } + - { name: asset-hub-rococo, package: asset-hub-rococo-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-rococo } + - { name: asset-hub-westend, package: asset-hub-westend-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-westend } + - { name: bridge-hub-rococo, package: bridge-hub-rococo-runtime, path: cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo } + - { name: contracts-rococo, package: contracts-rococo-runtime, path: cumulus/parachains/runtimes/contracts/contracts-rococo } + build_config: + # Release build has logging disabled and no dev features + - { type: on-chain-release, opts: --features on-chain-release-build } + # Debug build has logging enabled and developer features + - { type: dev-debug-build, opts: --features try-runtime } + + runs-on: ubuntu-22.04 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Build ${{ matrix.runtime.name }} ${{ matrix.build_config.type }} + id: srtool_build + uses: chevdor/srtool-actions@v0.8.0 + env: + BUILD_OPTS: ${{ matrix.build_config.opts }} + with: + chain: ${{ matrix.runtime.name }} + package: ${{ matrix.runtime.package }} + runtime_dir: ${{ matrix.runtime.path }} + profile: ${{ env.PROFILE }} + + - name: Build Summary + run: | + echo "${{ steps.srtool_build.outputs.json }}" | jq . > ${{ matrix.runtime.name }}-srtool-digest.json + cat ${{ matrix.runtime.name }}-srtool-digest.json + echo "Runtime location: ${{ steps.srtool_build.outputs.wasm }}" + + - name: Set up paths and runtime names + id: setup + run: | + RUNTIME_BLOB_NAME=$(echo ${{ matrix.runtime.package }} | sed 's/-/_/g').compact.compressed.wasm + PREFIX=${{ matrix.build_config.type == 'dev-debug-build' && 'DEV_DEBUG_BUILD__' || '' }} + + echo "RUNTIME_BLOB_NAME=$RUNTIME_BLOB_NAME" >> $GITHUB_ENV + echo "ASSET_PATH=./${{ matrix.runtime.path }}/target/srtool/${{ env.PROFILE }}/wbuild/${{ matrix.runtime.package }}/$RUNTIME_BLOB_NAME" >> $GITHUB_ENV + echo "ASSET_NAME=$PREFIX$RUNTIME_BLOB_NAME" >> $GITHUB_ENV + + - name: Upload Runtime to Release + uses: actions/upload-release-asset@v1 + with: + upload_url: ${{ github.event.release.upload_url }} + asset_path: ${{ env.ASSET_PATH }} + asset_name: ${{ env.ASSET_NAME }} + asset_content_type: application/octet-stream + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index ea5b5fa05540..ebf811e54635 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -240,3 +240,8 @@ std = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 5e3807f27858..7c7a07314596 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -217,3 +217,8 @@ std = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 5befb21c8911..8c4e1612780f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -233,3 +233,8 @@ try-runtime = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index a020b66baae6..0eb2428f358a 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -192,3 +192,8 @@ try-runtime = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml index e8abc61311c2..f5362e4d6b24 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml @@ -125,3 +125,8 @@ try-runtime = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 6d0dee3e4343..243f4337cae1 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -314,7 +314,7 @@ fast-runtime = [] runtime-metrics = [ "runtime-parachains/runtime-metrics", "sp-io/with-tracing" ] -# A feature that should be enabled when the runtime should be build for on-chain +# A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index a4f1bfb007ec..cb1118cf92fb 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -338,7 +338,7 @@ fast-runtime = [] runtime-metrics = [ "runtime-parachains/runtime-metrics", "sp-io/with-tracing" ] -# A feature that should be enabled when the runtime should be build for on-chain +# A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. on-chain-release-build = [ "sp-api/disable-logging" ] From 8d85faa3e65c2eb29533d43ae8795e4c991df7b7 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Sat, 28 Oct 2023 11:49:51 +0300 Subject: [PATCH 21/69] AHs benchmarks: fix transfer to sibling parachain --- cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs | 2 +- .../parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs | 2 +- cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index f83ed416f327..25d5712f4dfb 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -1239,7 +1239,7 @@ impl_runtime_apis! { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parachain(random_para_id).into(), + ParentThen(Parachain(random_para_id).into()).into(), )) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index d2f318ab11e7..dd111355c02b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -1118,7 +1118,7 @@ impl_runtime_apis! { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parachain(random_para_id).into(), + ParentThen(Parachain(random_para_id).into()).into(), )) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 44fa05b9924e..1f54c3ca997b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1327,7 +1327,7 @@ impl_runtime_apis! { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parachain(random_para_id).into(), + ParentThen(Parachain(random_para_id).into()).into(), )) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index af7e41f81236..cc04db22785b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1303,7 +1303,7 @@ impl_runtime_apis! { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) }.into(), - Parachain(random_para_id).into(), + ParentThen(Parachain(random_para_id).into()).into(), )) } } From 8ce16ee6ff190912995dc6c8da85b741eaada395 Mon Sep 17 00:00:00 2001 From: Vadim Smirnov Date: Sun, 29 Oct 2023 20:20:40 +0800 Subject: [PATCH 22/69] fix(frame-benchmarking-cli): Pass heap_pages param to WasmExecutor (#2075) In https://github.com/paritytech/substrate/pull/13740 the use of the `heap-pages` param inside the `frame-benchmarking-cli` has been removed. This results in running out of memory and this PR fixes the heap allocation strategy for benchmarks wasm executor. --- .../utils/frame/benchmarking-cli/src/pallet/command.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index 99f77866f8d0..5c76ca68e85f 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -25,7 +25,7 @@ use frame_support::traits::StorageInfo; use linked_hash_map::LinkedHashMap; use sc_cli::{execution_method_from_cli, CliConfiguration, Result, SharedParams}; use sc_client_db::BenchmarkingState; -use sc_executor::WasmExecutor; +use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_service::Configuration; use serde::Serialize; use sp_core::{ @@ -219,12 +219,20 @@ impl PalletCmd { let method = execution_method_from_cli(self.wasm_method, self.wasmtime_instantiation_strategy); + let heap_pages = + self.heap_pages + .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |p| HeapAllocStrategy::Static { + extra_pages: p as _, + }); + let executor = WasmExecutor::<( sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions, ExtraHostFunctions, )>::builder() .with_execution_method(method) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) .with_max_runtime_instances(2) .with_runtime_cache_size(2) .build(); From 7035034710ecb9c6a786284e5f771364c520598d Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sun, 29 Oct 2023 18:25:33 +0100 Subject: [PATCH 23/69] Improve Client CLI help readability (#2073) Currently the CLI `-h/--help` commad output is almost unreadable as (for some commands) it: - doesn't provide a short brief of what the command does. - doesn't separate the options description in smaller paragraphs. - doesn't use a smart wrap strategy for lines longer than the number of columns in the terminal. Follow some pics taken with a 100 cols wide term ## Short help (./node -h) ### Before ![20231028-174531-grim](https://github.com/paritytech/polkadot-sdk/assets/8143589/11b62c3c-dcd5-43f4-ac58-f1b299e3f4b9) ### After ![20231028-175041-grim](https://github.com/paritytech/polkadot-sdk/assets/8143589/dc08f6fd-b287-40fb-8b33-71a185922104) ## Long help (./node --help) ### Before ![20231028-175257-grim](https://github.com/paritytech/polkadot-sdk/assets/8143589/9ebdc0ae-54ee-4760-b873-a7e813523cb6) ### After ![20231028-175155-grim](https://github.com/paritytech/polkadot-sdk/assets/8143589/69cbe5cb-eb2f-46a5-8ebf-76c0cf8c4bad) --------- Co-authored-by: command-bot <> --- Cargo.lock | 11 +++ polkadot/cli/src/cli.rs | 33 +++++---- substrate/client/cli/Cargo.toml | 2 +- substrate/client/cli/src/commands/run_cmd.rs | 69 ++++++++++++------- .../client/cli/src/params/import_params.rs | 19 +++-- .../client/cli/src/params/keystore_params.rs | 5 +- .../client/cli/src/params/network_params.rs | 47 +++++++++---- .../client/cli/src/params/node_key_params.rs | 42 ++++++----- .../cli/src/params/offchain_worker_params.rs | 8 +-- .../cli/src/params/prometheus_params.rs | 2 + .../client/cli/src/params/pruning_params.rs | 2 + .../client/cli/src/params/runtime_params.rs | 4 +- .../client/cli/src/params/shared_params.rs | 27 ++++++-- .../client/cli/src/params/telemetry_params.rs | 3 + .../cli/src/params/transaction_pool_params.rs | 4 +- substrate/client/storage-monitor/src/lib.rs | 9 ++- 16 files changed, 190 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 60790128506f..1183f9204a2d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2572,6 +2572,7 @@ dependencies = [ "anstyle", "clap_lex 0.5.1", "strsim", + "terminal_size", ] [[package]] @@ -18425,6 +18426,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +dependencies = [ + "rustix 0.38.8", + "windows-sys 0.48.0", +] + [[package]] name = "termtree" version = "0.4.1" diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index bc060c21fba7..e20e35c91038 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -84,29 +84,29 @@ pub struct RunCmd { /// Setup a GRANDPA scheduled voting pause. /// - /// This parameter takes two values, namely a block number and a delay (in - /// blocks). After the given block number is finalized the GRANDPA voter - /// will temporarily stop voting for new blocks until the given delay has - /// elapsed (i.e. until a block at height `pause_block + delay` is imported). + /// This parameter takes two values, namely a block number and a delay (in blocks). + /// + /// After the given block number is finalized the GRANDPA voter will temporarily + /// stop voting for new blocks until the given delay has elapsed (i.e. until a + /// block at height `pause_block + delay` is imported). #[arg(long = "grandpa-pause", num_args = 2)] pub grandpa_pause: Vec, - /// Disable the BEEFY gadget - /// (currently enabled by default on Rococo, Wococo and Versi). + /// Disable the BEEFY gadget. + /// + /// Currently enabled by default on 'Rococo', 'Wococo' and 'Versi'. #[arg(long)] pub no_beefy: bool, - /// Add the destination address to the jaeger agent. + /// Add the destination address to the 'Jaeger' agent. /// - /// Must be valid socket address, of format `IP:Port` - /// commonly `127.0.0.1:6831`. + /// Must be valid socket address, of format `IP:Port` (commonly `127.0.0.1:6831`). #[arg(long)] pub jaeger_agent: Option, /// Add the destination address to the `pyroscope` agent. /// - /// Must be valid socket address, of format `IP:Port` - /// commonly `127.0.0.1:4040`. + /// Must be valid socket address, of format `IP:Port` (commonly `127.0.0.1:4040`). #[arg(long)] pub pyroscope_server: Option, @@ -126,10 +126,13 @@ pub struct RunCmd { #[arg(long)] pub overseer_channel_capacity_override: Option, - /// Path to the directory where auxiliary worker binaries reside. If not specified, the main - /// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY: - /// if the path points to an executable rather then directory, that executable is used both as - /// preparation and execution worker. + /// Path to the directory where auxiliary worker binaries reside. + /// + /// If not specified, the main binary's directory is searched first, then + /// `/usr/lib/polkadot` is searched. + /// + /// TESTING ONLY: if the path points to an executable rather then directory, + /// that executable is used both as preparation and execution worker. #[arg(long, value_name = "PATH")] pub workers_path: Option, diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 98928700328f..dc53ed54d967 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" chrono = "0.4.27" -clap = { version = "4.4.6", features = ["derive", "string"] } +clap = { version = "4.4.6", features = ["derive", "string", "wrap_help"] } fdlimit = "0.2.1" futures = "0.3.21" libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]} diff --git a/substrate/client/cli/src/commands/run_cmd.rs b/substrate/client/cli/src/commands/run_cmd.rs index 5dda488b1333..bc62dc3324e3 100644 --- a/substrate/client/cli/src/commands/run_cmd.rs +++ b/substrate/client/cli/src/commands/run_cmd.rs @@ -40,35 +40,38 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; #[derive(Debug, Clone, Parser)] pub struct RunCmd { /// Enable validator mode. + /// /// The node will be started with the authority role and actively /// participate in any consensus task that it can (e.g. depending on /// availability of local keys). #[arg(long)] pub validator: bool, - /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA + /// Disable GRANDPA. + /// + /// Disables voter when running in validator mode, otherwise disable the GRANDPA /// observer. #[arg(long)] pub no_grandpa: bool, - /// Listen to all RPC interfaces. - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC - /// proxy server to filter out dangerous methods. More details: + /// Listen to all RPC interfaces (default: local). + /// + /// Not all RPC methods are safe to be exposed publicly. + /// + /// Use an RPC proxy server to filter out dangerous methods. More details: /// . + /// /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[arg(long)] pub rpc_external: bool, /// Listen to all RPC interfaces. + /// /// Same as `--rpc-external`. #[arg(long)] pub unsafe_rpc_external: bool, /// RPC methods to expose. - /// - `unsafe`: Exposes every RPC method. - /// - `safe`: Exposes only a safe subset of RPC methods, denying unsafe RPC methods. - /// - `auto`: Acts as `safe` if RPC is served externally, e.g. when `--rpc--external` is - /// passed, otherwise acts as `unsafe`. #[arg( long, value_name = "METHOD SET", @@ -79,15 +82,15 @@ pub struct RunCmd { )] pub rpc_methods: RpcMethods, - /// Set the the maximum RPC request payload size for both HTTP and WS in megabytes. + /// Set the maximum RPC request payload size for both HTTP and WS in megabytes. #[arg(long, default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB)] pub rpc_max_request_size: u32, - /// Set the the maximum RPC response payload size for both HTTP and WS in megabytes. + /// Set the maximum RPC response payload size for both HTTP and WS in megabytes. #[arg(long, default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB)] pub rpc_max_response_size: u32, - /// Set the the maximum concurrent subscriptions per connection. + /// Set the maximum concurrent subscriptions per connection. #[arg(long, default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN)] pub rpc_max_subscriptions_per_connection: u32, @@ -99,15 +102,17 @@ pub struct RunCmd { #[arg(long, value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS)] pub rpc_max_connections: u32, - /// Specify browser Origins allowed to access the HTTP & WS RPC servers. - /// A comma-separated list of origins (protocol://domain or special `null` + /// Specify browser *origins* allowed to access the HTTP and WS RPC servers. + /// + /// A comma-separated list of origins (`protocol://domain` or special `null` /// value). Value of `all` will disable origin validation. Default is to /// allow localhost and origins. When running in - /// --dev mode the default is to allow all origins. + /// `--dev` mode the default is to allow all origins. #[arg(long, value_name = "ORIGINS", value_parser = parse_cors)] pub rpc_cors: Option, /// The human-readable name for this node. + /// /// It's used as network node name. #[arg(long, value_name = "NAME")] pub name: Option, @@ -148,36 +153,51 @@ pub struct RunCmd { #[clap(flatten)] pub keystore_params: KeystoreParams, - /// Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. + /// Shortcut for `--name Alice --validator`. + /// + /// Session keys for `Alice` are added to keystore. #[arg(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub alice: bool, - /// Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. + /// Shortcut for `--name Bob --validator`. + /// + /// Session keys for `Bob` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub bob: bool, - /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to - /// keystore. + /// Shortcut for `--name Charlie --validator`. + /// + /// Session keys for `Charlie` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] pub charlie: bool, - /// Shortcut for `--name Dave --validator` with session keys for `Dave` added to keystore. + /// Shortcut for `--name Dave --validator`. + /// + /// Session keys for `Dave` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] pub dave: bool, - /// Shortcut for `--name Eve --validator` with session keys for `Eve` added to keystore. + /// Shortcut for `--name Eve --validator`. + /// + /// Session keys for `Eve` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] pub eve: bool, - /// Shortcut for `--name Ferdie --validator` with session keys for `Ferdie` added to keystore. + /// Shortcut for `--name Ferdie --validator`. + /// + /// Session keys for `Ferdie` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] pub ferdie: bool, - /// Shortcut for `--name One --validator` with session keys for `One` added to keystore. + /// Shortcut for `--name One --validator`. + /// + /// Session keys for `One` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] pub one: bool, - /// Shortcut for `--name Two --validator` with session keys for `Two` added to keystore. + /// Shortcut for `--name Two --validator`. + /// + /// Session keys for `Two` are added to keystore. #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] pub two: bool, @@ -186,10 +206,13 @@ pub struct RunCmd { pub force_authoring: bool, /// Run a temporary node. + /// /// A temporary directory will be created to store the configuration and will be deleted /// at the end of the process. + /// /// Note: the directory is random per process execution. This directory is used as base path /// which includes: database, node key and keystore. + /// /// When `--dev` is given and no explicit `--base-path`, this option is implied. #[arg(long, conflicts_with = "base_path")] pub tmp: bool, diff --git a/substrate/client/cli/src/params/import_params.rs b/substrate/client/cli/src/params/import_params.rs index bfa54a35058f..add7cb4f8505 100644 --- a/substrate/client/cli/src/params/import_params.rs +++ b/substrate/client/cli/src/params/import_params.rs @@ -48,6 +48,7 @@ pub struct ImportParams { pub wasm_method: WasmExecutionMethod, /// The WASM instantiation method to use. + /// /// Only has an effect when `wasm-execution` is set to `compiled`. /// The copy-on-write strategies are only supported on Linux. /// If the copy-on-write variant of a strategy is unsupported @@ -65,6 +66,7 @@ pub struct ImportParams { pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// Specify the path where local WASM runtimes are stored. + /// /// These runtimes will override on-chain runtimes when the version matches. #[arg(long, value_name = "PATH")] pub wasm_runtime_overrides: Option, @@ -74,12 +76,12 @@ pub struct ImportParams { pub execution_strategies: ExecutionStrategiesParams, /// Specify the state cache size. + /// /// Providing `0` will disable the cache. #[arg(long, value_name = "Bytes", default_value_t = 67108864)] pub trie_cache_size: usize, - /// DEPRECATED - /// Switch to `--trie-cache-size`. + /// DEPRECATED: switch to `--trie-cache-size`. #[arg(long)] state_cache_size: Option, } @@ -115,26 +117,23 @@ impl ImportParams { /// Execution strategies parameters. #[derive(Debug, Clone, Args)] pub struct ExecutionStrategiesParams { - /// The means of execution used when calling into the runtime for importing blocks as - /// part of an initial sync. + /// Runtime execution strategy for importing blocks during initial sync. #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_syncing: Option, - /// The means of execution used when calling into the runtime for general block import - /// (including locally authored blocks). + /// Runtime execution strategy for general block import (including locally authored blocks). #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_import_block: Option, - /// The means of execution used when calling into the runtime while constructing blocks. + /// Runtime execution strategy for constructing blocks. #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_block_construction: Option, - /// The means of execution used when calling into the runtime while using an off-chain worker. + /// Runtime execution strategy for offchain workers. #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_offchain_worker: Option, - /// The means of execution used when calling into the runtime while not syncing, importing or - /// constructing blocks. + /// Runtime execution strategy when not syncing, importing or constructing blocks. #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_other: Option, diff --git a/substrate/client/cli/src/params/keystore_params.rs b/substrate/client/cli/src/params/keystore_params.rs index 87210c3390ca..5a5d0f949995 100644 --- a/substrate/client/cli/src/params/keystore_params.rs +++ b/substrate/client/cli/src/params/keystore_params.rs @@ -39,8 +39,9 @@ pub struct KeystoreParams { #[arg(long, conflicts_with_all = &["password", "password_filename"])] pub password_interactive: bool, - /// Password used by the keystore. This allows appending an extra user-defined secret to the - /// seed. + /// Password used by the keystore. + /// + /// This allows appending an extra user-defined secret to the seed. #[arg( long, value_parser = secret_string_from_str, diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs index 84db218cc51d..12f19df2a685 100644 --- a/substrate/client/cli/src/params/network_params.rs +++ b/substrate/client/cli/src/params/network_params.rs @@ -42,6 +42,7 @@ pub struct NetworkParams { pub reserved_nodes: Vec, /// Whether to only synchronize the chain with reserved nodes. + /// /// Also disables automatic peer discovery. /// TCP connections might still be established with non-reserved nodes. /// In particular, if you are a validator your node might still connect to other @@ -50,7 +51,8 @@ pub struct NetworkParams { #[arg(long)] pub reserved_only: bool, - /// The public address that other nodes will use to connect to it. + /// Public address that other nodes will use to connect to this node. + /// /// This can be used if there's a proxy in front of this node. #[arg(long, value_name = "PUBLIC_ADDR", num_args = 1..)] pub public_addr: Vec, @@ -67,20 +69,28 @@ pub struct NetworkParams { #[arg(long, value_name = "PORT", conflicts_with_all = &[ "listen_addr" ])] pub port: Option, - /// Always forbid connecting to private IPv4/IPv6 addresses (as specified in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with - /// `--reserved-nodes` or `--bootnodes`. Enabled by default for chains marked as "live" in - /// their chain specifications. + /// Always forbid connecting to private IPv4/IPv6 addresses. + /// + /// The option doesn't apply to addresses passed with `--reserved-nodes` or + /// `--bootnodes`. Enabled by default for chains marked as "live" in their chain + /// specifications. + /// + /// Address allocation for private networks is specified by + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). #[arg(long, alias = "no-private-ipv4", conflicts_with_all = &["allow_private_ip"])] pub no_private_ip: bool, - /// Always accept connecting to private IPv4/IPv6 addresses (as specified in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Enabled by default for chains marked as - /// "local" in their chain specifications, or when `--dev` is passed. + /// Always accept connecting to private IPv4/IPv6 addresses. + /// + /// Enabled by default for chains marked as "local" in their chain specifications, + /// or when `--dev` is passed. + /// + /// Address allocation for private networks is specified by + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). #[arg(long, alias = "allow-private-ipv4", conflicts_with_all = &["no_private_ip"])] pub allow_private_ip: bool, - /// Specify the number of outgoing connections we're trying to maintain. + /// Number of outgoing connections we're trying to maintain. #[arg(long, value_name = "COUNT", default_value_t = 8)] pub out_peers: u32, @@ -92,15 +102,17 @@ pub struct NetworkParams { #[arg(long, value_name = "COUNT", default_value_t = 100)] pub in_peers_light: u32, - /// Disable mDNS discovery. + /// Disable mDNS discovery (default: true). + /// /// By default, the network will use mDNS to discover other nodes on the /// local network. This disables it. Automatically implied when using --dev. #[arg(long)] pub no_mdns: bool, /// Maximum number of peers from which to ask for the same blocks in parallel. - /// This allows downloading announced blocks from multiple peers. Decrease to save - /// traffic and risk increased latency. + /// + /// This allows downloading announced blocks from multiple peers. + /// Decrease to save traffic and risk increased latency. #[arg(long, value_name = "COUNT", default_value_t = 5)] pub max_parallel_downloads: u32, @@ -109,19 +121,24 @@ pub struct NetworkParams { pub node_key_params: NodeKeyParams, /// Enable peer discovery on local networks. + /// /// By default this option is `true` for `--dev` or when the chain type is /// `Local`/`Development` and false otherwise. #[arg(long)] pub discover_local: bool, - /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in - /// the presence of potentially adversarial nodes. + /// Require iterative Kademlia DHT queries to use disjoint paths. + /// + /// Disjoint paths increase resiliency in the presence of potentially adversarial nodes. + /// /// See the S/Kademlia paper for more information on the high level design as well as its /// security improvements. #[arg(long)] pub kademlia_disjoint_query_paths: bool, - /// Kademlia replication factor determines to how many closest peers a record is replicated to. + /// Kademlia replication factor. + /// + /// Determines to how many closest peers a record is replicated to. /// /// Discovery mechanism requires successful replication to all /// `kademlia_replication_factor` peers to consider record successfully put. diff --git a/substrate/client/cli/src/params/node_key_params.rs b/substrate/client/cli/src/params/node_key_params.rs index 8c5579eaec49..53f19f58e1fb 100644 --- a/substrate/client/cli/src/params/node_key_params.rs +++ b/substrate/client/cli/src/params/node_key_params.rs @@ -32,39 +32,49 @@ const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; /// used for libp2p networking. #[derive(Debug, Clone, Args)] pub struct NodeKeyParams { - /// The secret key to use for libp2p networking. + /// Secret key to use for p2p networking. + /// /// The value is a string that is parsed according to the choice of /// `--node-key-type` as follows: - /// `ed25519`: - /// The value is parsed as a hex-encoded Ed25519 32 byte secret key, - /// i.e. 64 hex characters. + /// + /// - `ed25519`: the value is parsed as a hex-encoded Ed25519 32 byte secret key (64 hex + /// chars) + /// /// The value of this option takes precedence over `--node-key-file`. + /// /// WARNING: Secrets provided as command-line arguments are easily exposed. /// Use of this option should be limited to development and testing. To use /// an externally managed secret key, use `--node-key-file` instead. #[arg(long, value_name = "KEY")] pub node_key: Option, - /// The type of secret key to use for libp2p networking. + /// Crypto primitive to use for p2p networking. + /// /// The secret key of the node is obtained as follows: - /// * If the `--node-key` option is given, the value is parsed as a secret key according to - /// the type. See the documentation for `--node-key`. - /// * If the `--node-key-file` option is given, the secret key is read from the specified - /// file. See the documentation for `--node-key-file`. - /// * Otherwise, the secret key is read from a file with a predetermined, type-specific name - /// from the chain-specific network config directory inside the base directory specified by - /// `--base-dir`. If this file does not exist, it is created with a newly generated secret - /// key of the chosen type. + /// + /// - If the `--node-key` option is given, the value is parsed as a secret key according to the + /// type. See the documentation for `--node-key`. + /// + /// - If the `--node-key-file` option is given, the secret key is read from the specified file. + /// See the documentation for `--node-key-file`. + /// + /// - Otherwise, the secret key is read from a file with a predetermined, type-specific name + /// from the chain-specific network config directory inside the base directory specified by + /// `--base-dir`. If this file does not exist, it is created with a newly generated secret + /// key of the chosen type. + /// /// The node's secret key determines the corresponding public key and hence the /// node's peer ID in the context of libp2p. #[arg(long, value_name = "TYPE", value_enum, ignore_case = true, default_value_t = NodeKeyType::Ed25519)] pub node_key_type: NodeKeyType, - /// The file from which to read the node's secret key to use for libp2p networking. + /// File from which to read the node's secret key to use for p2p networking. + /// /// The contents of the file are parsed according to the choice of `--node-key-type` /// as follows: - /// `ed25519`: - /// The file must contain an unencoded 32 byte or hex encoded Ed25519 secret key. + /// + /// - `ed25519`: the file must contain an unencoded 32 byte or hex encoded Ed25519 secret key. + /// /// If the file does not exist, it is created with a newly generated secret key of /// the chosen type. #[arg(long, value_name = "FILE")] diff --git a/substrate/client/cli/src/params/offchain_worker_params.rs b/substrate/client/cli/src/params/offchain_worker_params.rs index d1fedab4cb2e..3583d85c00a7 100644 --- a/substrate/client/cli/src/params/offchain_worker_params.rs +++ b/substrate/client/cli/src/params/offchain_worker_params.rs @@ -32,8 +32,7 @@ use crate::{error, OffchainWorkerEnabled}; /// Offchain worker related parameters. #[derive(Debug, Clone, Args)] pub struct OffchainWorkerParams { - /// Should execute offchain workers on every block. - /// By default it's only enabled for nodes that are authoring new blocks. + /// Execute offchain workers on every block. #[arg( long = "offchain-worker", value_name = "ENABLED", @@ -43,8 +42,9 @@ pub struct OffchainWorkerParams { )] pub enabled: OffchainWorkerEnabled, - /// Enable Offchain Indexing API, which allows block import to write to Offchain DB. - /// Enables a runtime to write directly to a offchain workers DB during block import. + /// Enable offchain indexing API. + /// + /// Allows the runtime to write directly to offchain workers DB during block import. #[arg(long = "enable-offchain-indexing", value_name = "ENABLE_OFFCHAIN_INDEXING", default_value_t = false, action = ArgAction::Set)] pub indexing_enabled: bool, } diff --git a/substrate/client/cli/src/params/prometheus_params.rs b/substrate/client/cli/src/params/prometheus_params.rs index 4d234ea33c20..69199ad5b260 100644 --- a/substrate/client/cli/src/params/prometheus_params.rs +++ b/substrate/client/cli/src/params/prometheus_params.rs @@ -27,10 +27,12 @@ pub struct PrometheusParams { #[arg(long, value_name = "PORT")] pub prometheus_port: Option, /// Expose Prometheus exporter on all interfaces. + /// /// Default is local. #[arg(long)] pub prometheus_external: bool, /// Do not expose a Prometheus exporter endpoint. + /// /// Prometheus metric endpoint is enabled by default. #[arg(long)] pub no_prometheus: bool, diff --git a/substrate/client/cli/src/params/pruning_params.rs b/substrate/client/cli/src/params/pruning_params.rs index 1b5bf247d942..25b17b532898 100644 --- a/substrate/client/cli/src/params/pruning_params.rs +++ b/substrate/client/cli/src/params/pruning_params.rs @@ -24,6 +24,7 @@ use sc_service::{BlocksPruning, PruningMode}; #[derive(Debug, Clone, Args)] pub struct PruningParams { /// Specify the state pruning mode. + /// /// This mode specifies when the block's state (ie, storage) /// should be pruned (ie, removed) from the database. /// This setting can only be set on the first creation of the database. Every subsequent run @@ -38,6 +39,7 @@ pub struct PruningParams { pub state_pruning: Option, /// Specify the blocks pruning mode. + /// /// This mode specifies when the block's body (including justifications) /// should be pruned (ie, removed) from the database. /// Possible values: diff --git a/substrate/client/cli/src/params/runtime_params.rs b/substrate/client/cli/src/params/runtime_params.rs index 07009a96ee6e..a130d808418e 100644 --- a/substrate/client/cli/src/params/runtime_params.rs +++ b/substrate/client/cli/src/params/runtime_params.rs @@ -22,7 +22,9 @@ use std::str::FromStr; /// Parameters used to config runtime. #[derive(Debug, Clone, Args)] pub struct RuntimeParams { - /// The size of the instances cache for each runtime. The values higher than 32 are illegal. + /// The size of the instances cache for each runtime [max: 32]. + /// + /// Values higher than 32 are illegal. #[arg(long, default_value_t = 8, value_parser = parse_max_runtime_instances)] pub max_runtime_instances: usize, diff --git a/substrate/client/cli/src/params/shared_params.rs b/substrate/client/cli/src/params/shared_params.rs index 3d20ca504a69..6419e15c62ab 100644 --- a/substrate/client/cli/src/params/shared_params.rs +++ b/substrate/client/cli/src/params/shared_params.rs @@ -25,12 +25,14 @@ use std::path::PathBuf; #[derive(Debug, Clone, Args)] pub struct SharedParams { /// Specify the chain specification. - /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file - /// with the chainspec (such as one exported by the `build-spec` subcommand). + /// + /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to + /// a file with the chainspec (such as one exported by the `build-spec` subcommand). #[arg(long, value_name = "CHAIN_SPEC")] pub chain: Option, /// Specify the development chain. + /// /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, /// `--alice`, and `--tmp` flags, unless explicitly overridden. #[arg(long, conflicts_with_all = &["chain"])] @@ -40,14 +42,23 @@ pub struct SharedParams { #[arg(long, short = 'd', value_name = "PATH")] pub base_path: Option, - /// Sets a custom logging filter. Syntax is `=`, e.g. -lsync=debug. - /// Log levels (least to most verbose) are error, warn, info, debug, and trace. + /// Sets a custom logging filter (syntax: `=`). + /// + /// Log levels (least to most verbose) are `error`, `warn`, `info`, `debug`, and `trace`. + /// /// By default, all targets log `info`. The global log level can be set with `-l`. + /// + /// Multiple `=` entries can be specified and separated by a comma. + /// + /// *Example*: `--log error,sync=debug,grandpa=warn`. + /// Sets Global log level to `error`, sets `sync` target to debug and grandpa target to `warn`. #[arg(short = 'l', long, value_name = "LOG_PATTERN", num_args = 1..)] pub log: Vec, /// Enable detailed log output. - /// This includes displaying the log target, log level and thread name. + /// + /// Includes displaying the log target, log level and thread name. + /// /// This is automatically enabled when something is logged with any higher level than `info`. #[arg(long)] pub detailed_log_output: bool, @@ -57,14 +68,18 @@ pub struct SharedParams { pub disable_log_color: bool, /// Enable feature to dynamically update and reload the log filter. + /// /// Be aware that enabling this feature can lead to a performance decrease up to factor six or /// more. Depending on the global logging level the performance decrease changes. + /// /// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this /// option not being set. #[arg(long)] pub enable_log_reloading: bool, - /// Sets a custom profiling filter. Syntax is the same as for logging: `=`. + /// Sets a custom profiling filter. + /// + /// Syntax is the same as for logging (`--log`). #[arg(long, value_name = "TARGETS")] pub tracing_targets: Option, diff --git a/substrate/client/cli/src/params/telemetry_params.rs b/substrate/client/cli/src/params/telemetry_params.rs index 67f441071410..3b3d91e6b04e 100644 --- a/substrate/client/cli/src/params/telemetry_params.rs +++ b/substrate/client/cli/src/params/telemetry_params.rs @@ -22,14 +22,17 @@ use clap::Args; #[derive(Debug, Clone, Args)] pub struct TelemetryParams { /// Disable connecting to the Substrate telemetry server. + /// /// Telemetry is on by default on global chains. #[arg(long)] pub no_telemetry: bool, /// The URL of the telemetry server to connect to. + /// /// This flag can be passed multiple times as a means to specify multiple /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting /// the least verbosity. + /// /// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`. #[arg(long = "telemetry-url", value_name = "URL VERBOSITY", value_parser = parse_telemetry_endpoints)] pub telemetry_endpoints: Vec<(String, u8)>, diff --git a/substrate/client/cli/src/params/transaction_pool_params.rs b/substrate/client/cli/src/params/transaction_pool_params.rs index b2bf0b9b364c..48b2e5b1572b 100644 --- a/substrate/client/cli/src/params/transaction_pool_params.rs +++ b/substrate/client/cli/src/params/transaction_pool_params.rs @@ -30,7 +30,9 @@ pub struct TransactionPoolParams { #[arg(long, value_name = "COUNT", default_value_t = 20480)] pub pool_kbytes: usize, - /// How long a transaction is banned for, if it is considered invalid. Defaults to 1800s. + /// How long a transaction is banned for. + /// + /// If it is considered invalid. Defaults to 1800s. #[arg(long, value_name = "SECONDS")] pub tx_ban_seconds: Option, } diff --git a/substrate/client/storage-monitor/src/lib.rs b/substrate/client/storage-monitor/src/lib.rs index 655b940e8bed..b88b66d2d60d 100644 --- a/substrate/client/storage-monitor/src/lib.rs +++ b/substrate/client/storage-monitor/src/lib.rs @@ -42,9 +42,12 @@ pub enum Error { /// Parameters used to create the storage monitor. #[derive(Default, Debug, Clone, Args)] pub struct StorageMonitorParams { - /// Required available space on database storage. If available space for DB storage drops below - /// the given threshold, node will be gracefully terminated. If `0` is given monitoring will be - /// disabled. + /// Required available space on database storage. + /// + /// If available space for DB storage drops below the given threshold, node will + /// be gracefully terminated. + /// + /// If `0` is given monitoring will be disabled. #[arg(long = "db-storage-threshold", value_name = "MiB", default_value_t = 1024)] pub threshold: u64, From d715caa63a9dbb1b491ea3cd0a909208424abbab Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Mon, 30 Oct 2023 19:43:32 +1100 Subject: [PATCH 24/69] Improve `try-state` developer experience & fix bug (#2019) Making some devex improvements as I audit our chains adherence to try-state invariants, in preparation for automated try-state checks and alerting. Note to reviewer: while you're here, if you have time would be great to get your eyes on https://github.com/paritytech/polkadot-sdk/pull/1297 also since it touches a similar file and I'd like to avoid merge conflicts :P ## Devex Improvements - Changes the log level of logs informing the user that try-state checks are being run for a pallet from debug to info - Improves how errors are communicated - Errors are logged when they are encountered, rather than after everything has been executed - Exact pallet the error originated from is included with the error log - Clearly see all errors and how many there are, rather than only one - Closes #136 ### Example of new logs Screenshot 2023-10-25 at 15 44 44 ### Same but with old logs (run with RUST_LOG=debug) Notice only informed of one of the errors, and it's unclear which pallet it originated Screenshot 2023-10-25 at 15 39 01 ## Bug fix When dry-running migrations and `checks.try_state()` is `true`, only run `try_state` checks after migrations have been executed. Otherwise, `try_state` checks that expect state to be in at a HIGHER storage version than is on-chain could incorrectly fail. --------- Co-authored-by: command-bot <> --- substrate/frame/executive/src/lib.rs | 15 ++------ .../procedural/src/pallet/expand/hooks.rs | 36 +++++++++++-------- .../frame/support/src/traits/try_runtime.rs | 24 +++++++++++-- 3 files changed, 44 insertions(+), 31 deletions(-) diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index a4b12c6d31db..1ca9629fd420 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -349,23 +349,12 @@ where Ok(frame_system::Pallet::::block_weight().total()) } - /// Execute all `OnRuntimeUpgrade` of this runtime, including the pre and post migration checks. + /// Execute all `OnRuntimeUpgrade` of this runtime. /// - /// Runs the try-state code both before and after the migration function if `checks` is set to - /// `true`. Also, if set to `true`, it runs the `pre_upgrade` and `post_upgrade` hooks. + /// The `checks` param determines whether to execute `pre/post_upgrade` and `try_state` hooks. pub fn try_runtime_upgrade( checks: frame_try_runtime::UpgradeCheckSelect, ) -> Result { - if checks.try_state() { - let _guard = frame_support::StorageNoopGuard::default(); - , - >>::try_state( - frame_system::Pallet::::block_number(), - frame_try_runtime::TryStateSelect::All, - )?; - } - let weight = <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::try_on_runtime_upgrade( checks.pre_and_post(), diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 2825756f270f..aaad4dd2be0e 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -58,19 +58,6 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } }; - let log_try_state = quote::quote! { - let pallet_name = < - ::PalletInfo - as - #frame_support::traits::PalletInfo - >::name::().expect("No name found for the pallet! This usually means that the pallet wasn't added to `construct_runtime!`."); - #frame_support::__private::log::debug!( - target: #frame_support::LOG_TARGET, - "🩺 try-state pallet {:?}", - pallet_name, - ); - }; - let hooks_impl = if def.hooks.is_none() { let frame_system = &def.frame_system; quote::quote! { @@ -271,12 +258,31 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { n: #frame_system::pallet_prelude::BlockNumberFor::, _s: #frame_support::traits::TryStateSelect ) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { - #log_try_state + let pallet_name = < + ::PalletInfo + as + #frame_support::traits::PalletInfo + >::name::().expect("No name found for the pallet! This usually means that the pallet wasn't added to `construct_runtime!`."); + + #frame_support::__private::log::info!( + target: #frame_support::LOG_TARGET, + "🩺 Running {:?} try-state checks", + pallet_name, + ); < Self as #frame_support::traits::Hooks< #frame_system::pallet_prelude::BlockNumberFor:: > - >::try_state(n) + >::try_state(n).map_err(|err| { + #frame_support::__private::log::error!( + target: #frame_support::LOG_TARGET, + "❌ {:?} try_state checks failed: {:?}", + pallet_name, + err + ); + + err + }) } } ) diff --git a/substrate/frame/support/src/traits/try_runtime.rs b/substrate/frame/support/src/traits/try_runtime.rs index 31aebeeb4d99..e7a1fe109fc2 100644 --- a/substrate/frame/support/src/traits/try_runtime.rs +++ b/substrate/frame/support/src/traits/try_runtime.rs @@ -144,9 +144,27 @@ impl TryState Ok(()), Select::All => { - let mut result = Ok(()); - for_tuples!( #( result = result.and(Tuple::try_state(n.clone(), targets.clone())); )* ); - result + let mut error_count = 0; + for_tuples!(#( + if let Err(_) = Tuple::try_state(n.clone(), targets.clone()) { + error_count += 1; + } + )*); + + if error_count > 0 { + log::error!( + target: "try-runtime", + "{} pallets exited with errors while executing try_state checks.", + error_count + ); + + return Err( + "Detected errors while executing try_state checks. See logs for more info." + .into(), + ) + } + + Ok(()) }, Select::RoundRobin(len) => { let functions: &[fn(BlockNumber, Select) -> Result<(), TryRuntimeError>] = From 0aeab381380107a6462fea240181f6e2a78f21b8 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Mon, 30 Oct 2023 19:48:30 +1100 Subject: [PATCH 25/69] Stop `Balances` pallet erroneously double incrementing and decrementing consumers (#1976) Closes https://github.com/paritytech/polkadot-sdk/issues/1970 Follow up issue to tackle, once the erroneous double incrementing/decrementing has stopped: https://github.com/paritytech/polkadot-sdk/issues/2037 --- substrate/frame/balances/src/lib.rs | 27 ++++--------------- .../balances/src/tests/currency_tests.rs | 24 ++++++++++++++++- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index c6a2252df610..d518f933df8d 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -935,8 +935,8 @@ pub mod pallet { if did_provide && !does_provide { // This could reap the account so must go last. frame_system::Pallet::::dec_providers(who).map_err(|r| { + // best-effort revert consumer change. if did_consume && !does_consume { - // best-effort revert consumer change. let _ = frame_system::Pallet::::inc_consumers(who).defensive(); } if !did_consume && does_consume { @@ -1006,8 +1006,8 @@ pub mod pallet { let freezes = Freezes::::get(who); let mut prev_frozen = Zero::zero(); let mut after_frozen = Zero::zero(); - // TODO: Revisit this assumption. We no manipulate consumer/provider refs. // No way this can fail since we do not alter the existential balances. + // TODO: Revisit this assumption. let res = Self::mutate_account(who, |b| { prev_frozen = b.frozen; b.frozen = Zero::zero(); @@ -1024,26 +1024,9 @@ pub mod pallet { debug_assert!(maybe_dust.is_none(), "Not altering main balance; qed"); } - let existed = Locks::::contains_key(who); - if locks.is_empty() { - Locks::::remove(who); - if existed { - // TODO: use Locks::::hashed_key - // https://github.com/paritytech/substrate/issues/4969 - system::Pallet::::dec_consumers(who); - } - } else { - Locks::::insert(who, bounded_locks); - if !existed && system::Pallet::::inc_consumers_without_limit(who).is_err() { - // No providers for the locks. This is impossible under normal circumstances - // since the funds that are under the lock will themselves be stored in the - // account and therefore will need a reference. - log::warn!( - target: LOG_TARGET, - "Warning: Attempt to introduce lock consumer reference, yet no providers. \ - This is unexpected but should be safe." - ); - } + match locks.is_empty() { + true => Locks::::remove(who), + false => Locks::::insert(who, bounded_locks), } if prev_frozen > after_frozen { diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 2449638788dd..200df9ae743c 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -144,7 +144,9 @@ fn lock_removal_should_work() { .monied(true) .build_and_execute_with(|| { Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); Balances::remove_lock(ID_1, &1); + assert_eq!(System::consumers(&1), 0); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); } @@ -156,7 +158,9 @@ fn lock_replacement_should_work() { .monied(true) .build_and_execute_with(|| { Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); } @@ -168,7 +172,9 @@ fn double_locking_should_work() { .monied(true) .build_and_execute_with(|| { Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); Balances::set_lock(ID_2, &1, 5, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); } @@ -179,8 +185,11 @@ fn combination_locking_should_work() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { + assert_eq!(System::consumers(&1), 0); Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::empty()); + assert_eq!(System::consumers(&1), 0); Balances::set_lock(ID_2, &1, 0, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 0); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); } @@ -192,16 +201,19 @@ fn lock_value_extension_should_work() { .monied(true) .build_and_execute_with(|| { Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_noop!( >::transfer(&1, &2, 6, AllowDeath), TokenError::Frozen ); Balances::extend_lock(ID_1, &1, 2, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_noop!( >::transfer(&1, &2, 6, AllowDeath), TokenError::Frozen ); Balances::extend_lock(ID_1, &1, 8, WithdrawReasons::all()); + assert_eq!(System::consumers(&1), 1); assert_noop!( >::transfer(&1, &2, 3, AllowDeath), TokenError::Frozen @@ -1324,9 +1336,14 @@ fn freezing_and_locking_should_work() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { + // Consumer is shared between freezing and locking. + assert_eq!(System::consumers(&1), 0); assert_ok!(>::set_freeze(&TestId::Foo, &1, 4)); + assert_eq!(System::consumers(&1), 1); Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); - assert_eq!(System::consumers(&1), 2); + assert_eq!(System::consumers(&1), 1); + + // Frozen and locked balances update correctly. assert_eq!(Balances::account(&1).frozen, 5); assert_ok!(>::set_freeze(&TestId::Foo, &1, 6)); assert_eq!(Balances::account(&1).frozen, 6); @@ -1336,8 +1353,13 @@ fn freezing_and_locking_should_work() { assert_eq!(Balances::account(&1).frozen, 4); Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); assert_eq!(Balances::account(&1).frozen, 5); + + // Locks update correctly. Balances::remove_lock(ID_1, &1); assert_eq!(Balances::account(&1).frozen, 4); assert_eq!(System::consumers(&1), 1); + assert_ok!(>::set_freeze(&TestId::Foo, &1, 0)); + assert_eq!(Balances::account(&1).frozen, 0); + assert_eq!(System::consumers(&1), 0); }); } From ad5163ba93a7e142fd6fb2b9ed975bff9c29c479 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Mon, 30 Oct 2023 21:55:05 +1100 Subject: [PATCH 26/69] contracts migration: remove unnecessary panics (#2079) Runtime migration CI is currently failing (https://gitlab.parity.io/parity/mirrors/polkadot-sdk/builds/4122083) for the contracts testnet due to unnecessary panicing in a `pre_upgrade` hook. Soon idempotency will be enforced https://github.com/paritytech/try-runtime-cli/issues/42, in the mean time we need to manually fix these issues as they arise. --- also removes backticks from the string in `echo`, which caused a 'command not found' error in ci output --- .gitlab/pipeline/check.yml | 2 +- substrate/frame/contracts/src/migration.rs | 37 +++++++++++----------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index cd26003d88c1..ce3a61557920 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -107,7 +107,7 @@ check-rust-feature-propagation: echo "---------- Building ${PACKAGE} runtime ----------" time cargo build --release --locked -p "$PACKAGE" --features try-runtime - echo "---------- Executing `on-runtime-upgrade` for ${NETWORK} ----------" + echo "---------- Executing on-runtime-upgrade for ${NETWORK} ----------" time ./try-runtime \ --runtime ./target/release/wbuild/"$PACKAGE"/"$WASM" \ on-runtime-upgrade --checks=pre-and-post ${EXTRA_ARGS} live --uri ${URI} diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 271462073120..1873ef2765b1 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -263,14 +263,14 @@ impl Migration { impl OnRuntimeUpgrade for Migration { fn on_runtime_upgrade() -> Weight { let name = >::name(); - let latest_version = >::current_storage_version(); - let storage_version = >::on_chain_storage_version(); + let current_version = >::current_storage_version(); + let on_chain_version = >::on_chain_storage_version(); - if storage_version == latest_version { + if on_chain_version == current_version { log::warn!( target: LOG_TARGET, "{name}: No Migration performed storage_version = latest_version = {:?}", - &storage_version + &on_chain_version ); return T::WeightInfo::on_runtime_upgrade_noop() } @@ -281,7 +281,7 @@ impl OnRuntimeUpgrade for Migration OnRuntimeUpgrade for Migration::set(Some(cursor)); #[cfg(feature = "try-runtime")] @@ -308,24 +308,25 @@ impl OnRuntimeUpgrade for Migration>::on_chain_storage_version(); - let target_version = >::current_storage_version(); + let on_chain_version = >::on_chain_storage_version(); + let current_version = >::current_storage_version(); - ensure!( - storage_version != target_version, - "No upgrade: Please remove this migration from your runtime upgrade configuration." - ); + if on_chain_version == current_version { + log::warn!( + target: LOG_TARGET, + "No upgrade: Please remove this migration from your Migrations tuple" + ) + } log::debug!( target: LOG_TARGET, "Requested migration of {} from {:?}(on-chain storage version) to {:?}(current storage version)", - >::name(), storage_version, target_version + >::name(), on_chain_version, current_version ); - ensure!( - T::Migrations::is_upgrade_supported(storage_version, target_version), - "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)" - ); + if !T::Migrations::is_upgrade_supported(on_chain_version, current_version) { + log::warn!(target: LOG_TARGET, "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)") + } Ok(Default::default()) } From 37552fd5ca94bfb72ff09e6c1a5045a039b6371d Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 30 Oct 2023 13:24:22 +0200 Subject: [PATCH 27/69] Update polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs Co-authored-by: Francisco Aguirre --- polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs index 77b326052dcc..a35c6555c96c 100644 --- a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -74,7 +74,8 @@ pub trait AssetTransferSupport { if asset_location == MultiLocation::here() || Self::IsTeleporter::contains(asset, &asset_location) { - // if local asset, or remote location that allows local teleports => local reserve + // if the asset is local, then it's a local reserve + // it's also a local reserve if the asset's location is not `here` but it's a location where it can be teleported to `here` => local reserve Ok(TransferType::LocalReserve) } else if Self::IsReserve::contains(asset, &asset_location) { // remote location that is recognized as reserve location for asset From 6714d39146e80c8d8559e12b8af57fef1ef4f38f Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 30 Oct 2023 13:25:55 +0200 Subject: [PATCH 28/69] Update polkadot/xcm/pallet-xcm/src/lib.rs Co-authored-by: Francisco Aguirre --- polkadot/xcm/pallet-xcm/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 186dd2d51197..fcd51fd7b0f2 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1484,7 +1484,7 @@ impl Pallet { ClearOrigin, // buy exec using `fees` in holding deposited in top instruction here BuyExecution { fees: reanchored_fees, weight_limit }, - // deposit all assets in holding to `beneficiary` account(s) + // deposit all assets in holding to `beneficiary` location DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, ] .into_iter(), From 7a61129cb91a05d7aacb4f378151aa6c303a15e6 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 30 Oct 2023 13:31:38 +0200 Subject: [PATCH 29/69] fmt --- polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs index a35c6555c96c..980554f2dce2 100644 --- a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -75,7 +75,8 @@ pub trait AssetTransferSupport { Self::IsTeleporter::contains(asset, &asset_location) { // if the asset is local, then it's a local reserve - // it's also a local reserve if the asset's location is not `here` but it's a location where it can be teleported to `here` => local reserve + // it's also a local reserve if the asset's location is not `here` but it's a location + // where it can be teleported to `here` => local reserve Ok(TransferType::LocalReserve) } else if Self::IsReserve::contains(asset, &asset_location) { // remote location that is recognized as reserve location for asset From 30f3ad2eefce06fc3a1a063b57af22e9d75bb903 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 30 Oct 2023 15:15:36 +0200 Subject: [PATCH 30/69] Refactor transaction storage pallet to use fungible traits (#1800) Partial https://github.com/paritytech/polkadot-sdk/issues/226 `frame/transaction-storage`: replace `Currency` with `fungible::*` traits --------- Signed-off-by: Adrian Catangiu Co-authored-by: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> --- substrate/bin/node/runtime/src/lib.rs | 3 +- .../transaction-storage/src/benchmarking.rs | 16 +++--- .../frame/transaction-storage/src/lib.rs | 40 +++++++++----- .../frame/transaction-storage/src/mock.rs | 52 +++++-------------- .../frame/transaction-storage/src/tests.rs | 3 +- 5 files changed, 54 insertions(+), 60 deletions(-) diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index a2d100e1f8b5..f3c248976325 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -526,7 +526,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<1>; - type MaxHolds = ConstU32<5>; + type MaxHolds = ConstU32<6>; } parameter_types! { @@ -1833,6 +1833,7 @@ impl pallet_nfts::Config for Runtime { impl pallet_transaction_storage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; + type RuntimeHoldReason = RuntimeHoldReason; type RuntimeCall = RuntimeCall; type FeeDestination = (); type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; diff --git a/substrate/frame/transaction-storage/src/benchmarking.rs b/substrate/frame/transaction-storage/src/benchmarking.rs index fdbaeb1f9518..8d485d9f3cac 100644 --- a/substrate/frame/transaction-storage/src/benchmarking.rs +++ b/substrate/frame/transaction-storage/src/benchmarking.rs @@ -21,9 +21,9 @@ use super::*; use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; -use frame_support::traits::{Currency, Get, OnFinalize, OnInitialize}; +use frame_support::traits::{Get, OnFinalize, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, Pallet as System, RawOrigin}; -use sp_runtime::traits::{Bounded, One, Zero}; +use sp_runtime::traits::{Bounded, CheckedDiv, One, Zero}; use sp_std::*; use sp_transaction_storage_proof::TransactionStorageProof; @@ -103,9 +103,6 @@ fn proof() -> Vec { array_bytes::hex2bytes_unchecked(PROOF) } -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - fn assert_last_event(generic_event: ::RuntimeEvent) { let events = System::::events(); let system_event: ::RuntimeEvent = generic_event.into(); @@ -129,7 +126,8 @@ benchmarks! { store { let l in 1 .. T::MaxTransactionSize::get(); let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); + T::Currency::set_balance(&caller, initial_balance); }: _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]) verify { assert!(!BlockTransactions::::get().is_empty()); @@ -138,7 +136,8 @@ benchmarks! { renew { let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); + T::Currency::set_balance(&caller, initial_balance); TransactionStorage::::store( RawOrigin::Signed(caller.clone()).into(), vec![0u8; T::MaxTransactionSize::get() as usize], @@ -152,7 +151,8 @@ benchmarks! { check_proof_max { run_to_block::(1u32.into()); let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); + T::Currency::set_balance(&caller, initial_balance); for _ in 0 .. T::MaxBlockTransactions::get() { TransactionStorage::::store( RawOrigin::Signed(caller.clone()).into(), diff --git a/substrate/frame/transaction-storage/src/lib.rs b/substrate/frame/transaction-storage/src/lib.rs index 753f5ca0c7b5..fb8ada0f5f99 100644 --- a/substrate/frame/transaction-storage/src/lib.rs +++ b/substrate/frame/transaction-storage/src/lib.rs @@ -31,7 +31,14 @@ mod tests; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::GetDispatchInfo, - traits::{Currency, OnUnbalanced, ReservableCurrency}, + traits::{ + fungible::{ + hold::Balanced as FnBalanced, Inspect as FnInspect, Mutate as FnMutate, + MutateHold as FnMutateHold, + }, + tokens::fungible::Credit, + OnUnbalanced, + }, }; use sp_runtime::traits::{BlakeTwo256, Dispatchable, Hash, One, Saturating, Zero}; use sp_std::{prelude::*, result}; @@ -42,10 +49,8 @@ use sp_transaction_storage_proof::{ /// A type alias for the balance type from this pallet's point of view. type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency< - ::AccountId, ->>::NegativeImbalance; + <::Currency as FnInspect<::AccountId>>::Balance; +pub type CreditOf = Credit<::AccountId, ::Currency>; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; @@ -89,6 +94,13 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + /// A reason for this pallet placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// The funds are held as deposit for the used storage. + StorageFeeHold, + } + #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. @@ -98,10 +110,14 @@ pub mod pallet { + Dispatchable + GetDispatchInfo + From>; - /// The currency trait. - type Currency: ReservableCurrency; + /// The fungible type for this pallet. + type Currency: FnMutate + + FnMutateHold + + FnBalanced; + /// The overarching runtime hold reason. + type RuntimeHoldReason: From; /// Handler for the unbalanced decrease when fees are burned. - type FeeDestination: OnUnbalanced>; + type FeeDestination: OnUnbalanced>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// Maximum number of indexed transactions in the block. @@ -112,8 +128,6 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// Insufficient account balance. - InsufficientFunds, /// Invalid configuration. NotConfigured, /// Renewed extrinsic is not found. @@ -432,8 +446,10 @@ pub mod pallet { let byte_fee = ByteFee::::get().ok_or(Error::::NotConfigured)?; let entry_fee = EntryFee::::get().ok_or(Error::::NotConfigured)?; let fee = byte_fee.saturating_mul(size.into()).saturating_add(entry_fee); - ensure!(T::Currency::can_slash(&sender, fee), Error::::InsufficientFunds); - let (credit, _) = T::Currency::slash(&sender, fee); + T::Currency::hold(&HoldReason::StorageFeeHold.into(), &sender, fee)?; + let (credit, _remainder) = + T::Currency::slash(&HoldReason::StorageFeeHold.into(), &sender, fee); + debug_assert!(_remainder.is_zero()); T::FeeDestination::on_unbalanced(credit); Ok(()) } diff --git a/substrate/frame/transaction-storage/src/mock.rs b/substrate/frame/transaction-storage/src/mock.rs index 947c81c12acf..a8da19a382df 100644 --- a/substrate/frame/transaction-storage/src/mock.rs +++ b/substrate/frame/transaction-storage/src/mock.rs @@ -21,12 +21,11 @@ use crate::{ self as pallet_transaction_storage, TransactionStorageProof, DEFAULT_MAX_BLOCK_TRANSACTIONS, DEFAULT_MAX_TRANSACTION_SIZE, }; -use frame_support::traits::{ConstU16, ConstU32, ConstU64, OnFinalize, OnInitialize}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, +use frame_support::{ + derive_impl, + traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, }; +use sp_runtime::{traits::IdentityLookup, BuildStorage}; pub type Block = frame_system::mocking::MockBlock; @@ -37,58 +36,35 @@ frame_support::construct_runtime!( System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, TransactionStorage: pallet_transaction_storage::{ - Pallet, Call, Storage, Config, Inherent, Event + Pallet, Call, Storage, Config, Inherent, Event, HoldReason }, } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type DbWeight = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; + type AccountId = u64; + type BlockHashCount = ConstU64<250>; + type Lookup = IdentityLookup; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for Test { type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - type MaxHolds = (); + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type MaxHolds = ConstU32<128>; } impl pallet_transaction_storage::Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; type Currency = Balances; + type RuntimeHoldReason = RuntimeHoldReason; type FeeDestination = (); type WeightInfo = (); type MaxBlockTransactions = ConstU32<{ DEFAULT_MAX_BLOCK_TRANSACTIONS }>; diff --git a/substrate/frame/transaction-storage/src/tests.rs b/substrate/frame/transaction-storage/src/tests.rs index 43dfed81f88b..e17b3ca3bebd 100644 --- a/substrate/frame/transaction-storage/src/tests.rs +++ b/substrate/frame/transaction-storage/src/tests.rs @@ -21,6 +21,7 @@ use super::{Pallet as TransactionStorage, *}; use crate::mock::*; use frame_support::{assert_noop, assert_ok}; use frame_system::RawOrigin; +use sp_runtime::{DispatchError, TokenError::FundsUnavailable}; use sp_transaction_storage_proof::registration::build_proof; const MAX_DATA_SIZE: u32 = DEFAULT_MAX_TRANSACTION_SIZE; @@ -71,7 +72,7 @@ fn burns_fee() { RawOrigin::Signed(5).into(), vec![0u8; 2000 as usize] ), - Error::::InsufficientFunds, + DispatchError::Token(FundsUnavailable), ); assert_ok!(TransactionStorage::::store( RawOrigin::Signed(caller).into(), From a69da4a85f73e54a21b9cc80526d8222e234311c Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 30 Oct 2023 17:03:30 +0100 Subject: [PATCH 31/69] Switch from `tiny-bip39` to `bip39` crate (#2084) Switch from: https://crates.io/crates/tiny-bip39 to: https://crates.io/crates/bip39 Required for: https://github.com/paritytech/polkadot-sdk/pull/2044 --- Cargo.lock | 38 ++++--------------- substrate/client/cli/Cargo.toml | 3 +- substrate/client/cli/src/commands/generate.rs | 23 ++++++----- substrate/primitives/core/Cargo.toml | 7 +++- substrate/primitives/core/src/crypto.rs | 16 +++++--- 5 files changed, 38 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1183f9204a2d..c95cc70b0da4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1377,6 +1377,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ "bitcoin_hashes", + "rand 0.7.3", + "rand_core 0.5.1", + "serde", + "unicode-normalization", ] [[package]] @@ -11407,15 +11411,6 @@ dependencies = [ "crypto-mac 0.11.1", ] -[[package]] -name = "pbkdf2" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "pbkdf2" version = "0.12.2" @@ -14732,11 +14727,13 @@ name = "sc-cli" version = "0.10.0-dev" dependencies = [ "array-bytes 6.1.0", + "bip39", "chrono", "clap 4.4.6", "fdlimit", "futures", "futures-timer", + "itertools 0.10.5", "libp2p-identity", "log", "names 0.13.0", @@ -14765,7 +14762,6 @@ dependencies = [ "sp-version", "tempfile", "thiserror", - "tiny-bip39", "tokio", ] @@ -17076,6 +17072,7 @@ version = "21.0.0" dependencies = [ "array-bytes 6.1.0", "bandersnatch_vrfs", + "bip39", "bitflags 1.3.2", "blake2 0.10.6", "bounded-collections", @@ -17087,6 +17084,7 @@ dependencies = [ "hash-db", "hash256-std-hasher", "impl-serde", + "itertools 0.10.5", "lazy_static", "libsecp256k1", "log", @@ -17113,7 +17111,6 @@ dependencies = [ "ss58-registry", "substrate-bip39", "thiserror", - "tiny-bip39", "tracing", "w3f-bls", "zeroize", @@ -18707,25 +18704,6 @@ dependencies = [ "time-core", ] -[[package]] -name = "tiny-bip39" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62cc94d358b5a1e84a5cb9109f559aa3c4d634d2b1b4de3d0fa4adc7c78e2861" -dependencies = [ - "anyhow", - "hmac 0.12.1", - "once_cell", - "pbkdf2 0.11.0", - "rand 0.8.5", - "rustc-hash", - "sha2 0.10.7", - "thiserror", - "unicode-normalization", - "wasm-bindgen", - "zeroize", -] - [[package]] name = "tiny-keccak" version = "2.0.2" diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index dc53ed54d967..5a98b43f43c2 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -18,6 +18,7 @@ chrono = "0.4.27" clap = { version = "4.4.6", features = ["derive", "string", "wrap_help"] } fdlimit = "0.2.1" futures = "0.3.21" +itertools = "0.10.3" libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]} log = "0.4.17" names = { version = "0.13.0", default-features = false } @@ -28,7 +29,7 @@ rpassword = "7.0.0" serde = "1.0.188" serde_json = "1.0.107" thiserror = "1.0.48" -tiny-bip39 = "1.0.0" +bip39 = "2.0.0" tokio = { version = "1.22.0", features = ["signal", "rt-multi-thread", "parking_lot"] } sc-client-api = { path = "../api" } sc-client-db = { path = "../db", default-features = false} diff --git a/substrate/client/cli/src/commands/generate.rs b/substrate/client/cli/src/commands/generate.rs index 93b83fcbef51..c465bcc85a47 100644 --- a/substrate/client/cli/src/commands/generate.rs +++ b/substrate/client/cli/src/commands/generate.rs @@ -20,8 +20,9 @@ use crate::{ utils::print_from_uri, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, NetworkSchemeFlag, OutputTypeFlag, }; -use bip39::{Language, Mnemonic, MnemonicType}; +use bip39::Mnemonic; use clap::Parser; +use itertools::Itertools; /// The `generate` command #[derive(Debug, Clone, Parser)] @@ -52,20 +53,22 @@ impl GenerateCmd { /// Run the command pub fn run(&self) -> Result<(), Error> { let words = match self.words { - Some(words) => MnemonicType::for_word_count(words).map_err(|_| { - Error::Input( - "Invalid number of words given for phrase: must be 12/15/18/21/24".into(), - ) - })?, - None => MnemonicType::Words12, - }; - let mnemonic = Mnemonic::new(words, Language::English); + Some(words_count) if [12, 15, 18, 21, 24].contains(&words_count) => Ok(words_count), + Some(_) => Err(Error::Input( + "Invalid number of words given for phrase: must be 12/15/18/21/24".into(), + )), + None => Ok(12), + }?; + let mnemonic = Mnemonic::generate(words) + .map_err(|e| Error::Input(format!("Mnemonic generation failed: {e}").into()))?; let password = self.keystore_params.read_password()?; let output = self.output_scheme.output_type; + let phrase = mnemonic.word_iter().join(" "); + with_crypto_scheme!( self.crypto_scheme.scheme, - print_from_uri(mnemonic.phrase(), password, self.network_scheme.network, output) + print_from_uri(&phrase, password, self.network_scheme.network, output) ); Ok(()) } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 1b6b10eeeedf..1e8a353f419b 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -25,7 +25,7 @@ hash256-std-hasher = { version = "0.15.2", default-features = false } bs58 = { version = "0.5.0", default-features = false, optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } substrate-bip39 = { version = "0.4.4", optional = true } -tiny-bip39 = { version = "1.0.0", optional = true } +bip39 = { version = "2.0.0", default-features = false } regex = { version = "1.6.0", optional = true } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } @@ -42,6 +42,7 @@ thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } bitflags = "1.3" paste = "1.0.7" +itertools = { version = "0.10.3", optional = true } # full crypto array-bytes = { version = "6.1", optional = true } @@ -76,6 +77,8 @@ default = [ "std" ] std = [ "array-bytes", "bandersnatch_vrfs/getrandom", + "bip39/rand", + "bip39/std", "blake2/std", "bounded-collections/std", "bs58/std", @@ -88,6 +91,7 @@ std = [ "hash-db/std", "hash256-std-hasher/std", "impl-serde/std", + "itertools", "lazy_static", "libsecp256k1/std", "log/std", @@ -114,7 +118,6 @@ std = [ "ss58-registry/std", "substrate-bip39", "thiserror", - "tiny-bip39", "tracing", "w3f-bls?/std", "zeroize/alloc", diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index f7e2c56ca9a4..d369de5a1c01 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -19,9 +19,11 @@ use crate::{ed25519, sr25519}; #[cfg(feature = "std")] -use bip39::{Language, Mnemonic, MnemonicType}; +use bip39::{Language, Mnemonic}; use codec::{Decode, Encode, MaxEncodedLen}; #[cfg(feature = "std")] +use itertools::Itertools; +#[cfg(feature = "std")] use rand::{rngs::OsRng, RngCore}; #[cfg(feature = "std")] use regex::Regex; @@ -870,9 +872,9 @@ pub trait Pair: CryptoType + Sized { /// the key from the current session. #[cfg(feature = "std")] fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - let (pair, seed) = Self::from_phrase(phrase, password) + let mnemonic = Mnemonic::generate(12).expect("Mnemonic generation always works; qed"); + let phrase = mnemonic.word_iter().join(" "); + let (pair, seed) = Self::from_phrase(&phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); (pair, phrase.to_owned(), seed) } @@ -883,10 +885,12 @@ pub trait Pair: CryptoType + Sized { phrase: &str, password: Option<&str>, ) -> Result<(Self, Self::Seed), SecretStringError> { - let mnemonic = Mnemonic::from_phrase(phrase, Language::English) + let mnemonic = Mnemonic::parse_in(Language::English, phrase) .map_err(|_| SecretStringError::InvalidPhrase)?; + + let (entropy, entropy_len) = mnemonic.to_entropy_array(); let big_seed = - substrate_bip39::seed_from_entropy(mnemonic.entropy(), password.unwrap_or("")) + substrate_bip39::seed_from_entropy(&entropy[0..entropy_len], password.unwrap_or("")) .map_err(|_| SecretStringError::InvalidSeed)?; let mut seed = Self::Seed::default(); let seed_slice = seed.as_mut(); From 40ff09b8fb6a9f669b9d7da3ef4cd447ec2e1c39 Mon Sep 17 00:00:00 2001 From: yjh Date: Tue, 31 Oct 2023 00:05:27 +0800 Subject: [PATCH 32/69] pub `keystore_accounts/accounts_from_keys` for offchain Signer (#2051) --- substrate/frame/system/src/offchain.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index dd4ac6782a55..a019cfd666e8 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -154,8 +154,8 @@ impl, X> Signer /// all available accounts and the provided accounts /// in `with_filter`. If no accounts are provided, /// use all accounts by default. - fn accounts_from_keys<'a>(&'a self) -> Box> + 'a> { - let keystore_accounts = self.keystore_accounts(); + pub fn accounts_from_keys<'a>(&'a self) -> Box> + 'a> { + let keystore_accounts = Self::keystore_accounts(); match self.accounts { None => Box::new(keystore_accounts), Some(ref keys) => { @@ -175,7 +175,8 @@ impl, X> Signer } } - fn keystore_accounts(&self) -> impl Iterator> { + /// Return all available accounts in keystore. + pub fn keystore_accounts() -> impl Iterator> { C::RuntimeAppPublic::all().into_iter().enumerate().map(|(index, key)| { let generic_public = C::GenericPublic::from(key); let public: T::Public = generic_public.into(); From 2d9426f1cc144a0624ea0329ddc7e567bb47d6b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 30 Oct 2023 17:37:10 +0100 Subject: [PATCH 33/69] parachain-system: Send same event & digest as a standalone chain (#2064) This ensures that upgrading a parachain code sends the same event & digest as when using `set_code` on a standalone chain. Close: https://github.com/paritytech/polkadot-sdk/issues/2049 --- cumulus/pallets/parachain-system/src/lib.rs | 9 +-------- cumulus/pallets/parachain-system/src/tests.rs | 12 ++++++++++-- substrate/frame/system/src/lib.rs | 5 ++--- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 369281ccd8ee..84b4cda15347 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -39,7 +39,6 @@ use frame_support::{ dispatch::{DispatchResult, Pays, PostDispatchInfo}, ensure, inherent::{InherentData, InherentIdentifier, ProvideInherent}, - storage, traits::Get, weights::Weight, }; @@ -598,7 +597,7 @@ pub mod pallet { ); let validation_code = >::take(); - Self::put_parachain_code(&validation_code); + frame_system::Pallet::::update_code_in_storage(&validation_code); ::on_validation_code_applied(); Self::deposit_event(Event::ValidationFunctionApplied { relay_chain_block_num: vfp.relay_parent_number, @@ -1399,12 +1398,6 @@ impl Pallet { >::put(true); } - /// Put a new validation function into a particular location where this - /// parachain will execute it on subsequent blocks. - fn put_parachain_code(code: &[u8]) { - storage::unhashed::put_raw(sp_core::storage::well_known_keys::CODE, code); - } - /// The maximum code size permitted, in bytes. /// /// Returns `None` if the relay chain parachain host configuration hasn't been submitted yet. diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index 3f5b4f649e32..7db6a966ec91 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -869,7 +869,7 @@ fn hrmp_outbound_respects_used_bandwidth() { } #[test] -fn events() { +fn runtime_upgrade_events() { BlockTests::new() .with_relay_sproof_builder(|_, block_number, builder| { if block_number > 123 { @@ -894,12 +894,20 @@ fn events() { || {}, || { let events = System::events(); + + assert_eq!(events[0].event, RuntimeEvent::System(frame_system::Event::CodeUpdated)); + assert_eq!( - events[0].event, + events[1].event, RuntimeEvent::ParachainSystem(crate::Event::ValidationFunctionApplied { relay_chain_block_num: 1234 }) ); + + assert!(System::digest() + .logs() + .iter() + .any(|d| *d == sp_runtime::generic::DigestItem::RuntimeEnvironmentUpdated)); }, ); } diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index eca20f5a0a9f..dfdacc9a8eb9 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -163,7 +163,7 @@ pub trait SetCode { impl SetCode for () { fn set_code(code: Vec) -> DispatchResult { - >::update_code_in_storage(&code)?; + >::update_code_in_storage(&code); Ok(()) } } @@ -1106,11 +1106,10 @@ impl Pallet { /// Note this function almost never should be used directly. It is exposed /// for `OnSetCode` implementations that defer actual code being written to /// the storage (for instance in case of parachains). - pub fn update_code_in_storage(code: &[u8]) -> DispatchResult { + pub fn update_code_in_storage(code: &[u8]) { storage::unhashed::put_raw(well_known_keys::CODE, code); Self::deposit_log(generic::DigestItem::RuntimeEnvironmentUpdated); Self::deposit_event(Event::CodeUpdated); - Ok(()) } /// Increment the reference counter on an account. From 9faea380dce6db9aabba29cb01328df229764863 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 31 Oct 2023 11:08:08 +0100 Subject: [PATCH 34/69] PVF worker: Add seccomp restrictions (restrict networking) (#2009) --- Cargo.lock | 63 ++- .../node/core/candidate-validation/src/lib.rs | 3 +- polkadot/node/core/pvf/Cargo.toml | 5 + .../benches/host_prepare_rococo_runtime.rs | 18 +- polkadot/node/core/pvf/common/Cargo.toml | 3 +- polkadot/node/core/pvf/common/src/lib.rs | 5 +- .../node/core/pvf/common/src/worker/mod.rs | 57 +- .../core/pvf/common/src/worker/security.rs | 512 ------------------ .../common/src/worker/security/landlock.rs | 325 +++++++++++ .../pvf/common/src/worker/security/mod.rs | 189 +++++++ .../pvf/common/src/worker/security/seccomp.rs | 201 +++++++ .../node/core/pvf/execute-worker/Cargo.toml | 1 - .../node/core/pvf/execute-worker/src/lib.rs | 4 +- .../node/core/pvf/prepare-worker/Cargo.toml | 1 - .../node/core/pvf/prepare-worker/src/lib.rs | 9 +- .../pvf/prepare-worker/src/memory_stats.rs | 2 +- .../node/core/pvf/src/execute/worker_intf.rs | 35 +- polkadot/node/core/pvf/src/host.rs | 126 +---- polkadot/node/core/pvf/src/lib.rs | 1 + .../node/core/pvf/src/prepare/worker_intf.rs | 37 +- polkadot/node/core/pvf/src/security.rs | 312 +++++++++++ polkadot/node/core/pvf/src/worker_intf.rs | 5 +- polkadot/node/core/pvf/tests/it/adder.rs | 17 +- polkadot/node/core/pvf/tests/it/main.rs | 128 ++++- .../src/node/utility/pvf-host-and-workers.md | 13 + .../list-syscalls/execute-worker-syscalls | 9 - .../list-syscalls/prepare-worker-syscalls | 9 - 27 files changed, 1376 insertions(+), 714 deletions(-) delete mode 100644 polkadot/node/core/pvf/common/src/worker/security.rs create mode 100644 polkadot/node/core/pvf/common/src/worker/security/landlock.rs create mode 100644 polkadot/node/core/pvf/common/src/worker/security/mod.rs create mode 100644 polkadot/node/core/pvf/common/src/worker/security/seccomp.rs create mode 100644 polkadot/node/core/pvf/src/security.rs diff --git a/Cargo.lock b/Cargo.lock index c95cc70b0da4..c368a957764e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5642,7 +5642,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eeb4ed9e12f43b7fa0baae3f9cdda28352770132ef2e09a23760c29cae8bd47" dependencies = [ - "rustix 0.38.8", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -6596,7 +6596,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.8", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -7049,9 +7049,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libflate" @@ -7635,9 +7635,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lioness" @@ -12279,8 +12279,10 @@ dependencies = [ "polkadot-node-primitives", "polkadot-parachain-primitives", "polkadot-primitives", + "procfs", "rand 0.8.5", "rococo-runtime", + "rusty-fork", "slotmap", "sp-core", "sp-maybe-compressed-blob", @@ -12331,12 +12333,13 @@ dependencies = [ "sc-executor", "sc-executor-common", "sc-executor-wasmtime", + "seccompiler", "sp-core", "sp-externalities", "sp-io", "sp-tracing", "tempfile", - "tokio", + "thiserror", "tracing-gum", ] @@ -12354,7 +12357,6 @@ dependencies = [ "sp-core", "sp-maybe-compressed-blob", "sp-tracing", - "tokio", "tracing-gum", ] @@ -12377,7 +12379,6 @@ dependencies = [ "sp-maybe-compressed-blob", "sp-tracing", "tikv-jemalloc-ctl", - "tokio", "tracing-gum", ] @@ -13525,6 +13526,32 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "procfs" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" +dependencies = [ + "bitflags 2.4.0", + "chrono", + "flate2", + "hex", + "lazy_static", + "procfs-core", + "rustix 0.38.21", +] + +[[package]] +name = "procfs-core" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" +dependencies = [ + "bitflags 2.4.0", + "chrono", + "hex", +] + [[package]] name = "prometheus" version = "0.13.3" @@ -14452,14 +14479,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.8" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.5", + "linux-raw-sys 0.4.10", "windows-sys 0.48.0", ] @@ -14556,6 +14583,7 @@ dependencies = [ "fnv", "quick-error", "tempfile", + "wait-timeout", ] [[package]] @@ -16182,6 +16210,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "seccompiler" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "345a3e4dddf721a478089d4697b83c6c0a8f5bf16086f6c13397e4534eb6e2e5" +dependencies = [ + "libc", +] + [[package]] name = "secp256k1" version = "0.24.3" @@ -18410,7 +18447,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.8", + "rustix 0.38.21", "windows-sys 0.48.0", ] diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 21a7121d47bd..93db7d11cee8 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -149,7 +149,8 @@ async fn run( exec_worker_path, ), pvf_metrics, - ); + ) + .await; ctx.spawn_blocking("pvf-validation-host", task.boxed())?; loop { diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index bfd70c6fbd41..430f7cd5e8ef 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -39,6 +39,7 @@ polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = tr assert_matches = "1.4.0" criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support", "async_tokio"] } hex-literal = "0.4.1" + polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } # For benches and integration tests, depend on ourselves with the test-utils # feature. @@ -48,6 +49,10 @@ rococo-runtime = { path = "../../../runtime/rococo" } adder = { package = "test-parachain-adder", path = "../../../parachain/test-parachains/adder" } halt = { package = "test-parachain-halt", path = "../../../parachain/test-parachains/halt" } +[target.'cfg(target_os = "linux")'.dev-dependencies] +procfs = "0.16.0" +rusty-fork = "0.3.0" + [[bench]] name = "host_prepare_rococo_runtime" harness = false diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index 3069fa2b194b..acd80526262c 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -17,18 +17,15 @@ //! Benchmarks for preparation through the host. We use a real PVF to get realistic results. use criterion::{criterion_group, criterion_main, BatchSize, Criterion, SamplingMode}; -use parity_scale_codec::Encode; use polkadot_node_core_pvf::{ start, testing, Config, Metrics, PrepareError, PrepareJobKind, PrepareStats, PvfPrepData, - ValidationError, ValidationHost, + ValidationHost, }; -use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::ExecutorParams; use rococo_runtime::WASM_BINARY; use std::time::Duration; use tokio::{runtime::Handle, sync::Mutex}; -const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); struct TestHost { @@ -36,7 +33,7 @@ struct TestHost { } impl TestHost { - fn new_with_config(handle: &Handle, f: F) -> Self + async fn new_with_config(handle: &Handle, f: F) -> Self where F: FnOnce(&mut Config), { @@ -50,7 +47,7 @@ impl TestHost { execute_worker_path, ); f(&mut config); - let (host, task) = start(config, Metrics::default()); + let (host, task) = start(config, Metrics::default()).await; let _ = handle.spawn(task); Self { host: Mutex::new(host) } } @@ -107,15 +104,18 @@ fn host_prepare_rococo_runtime(c: &mut Criterion) { group.measurement_time(Duration::from_secs(240)); group.bench_function("host: prepare Rococo runtime", |b| { b.to_async(&rt).iter_batched( - || { + || async { ( TestHost::new_with_config(rt.handle(), |cfg| { cfg.prepare_workers_hard_max_num = 1; - }), + }) + .await, pvf.clone().code(), ) }, - |(host, pvf_code)| async move { + |result| async move { + let (host, pvf_code) = result.await; + // `PvfPrepData` is designed to be cheap to clone, so cloning shouldn't affect the // benchmark accuracy. let _stats = host.precheck_pvf(&pvf_code, Default::default()).await.unwrap(); diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 5fe2c6b6845c..7dc8d307026e 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -12,7 +12,6 @@ cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" -tokio = { version = "1.24.2", features = ["fs", "process", "io-util"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -30,6 +29,8 @@ sp-tracing = { path = "../../../../../substrate/primitives/tracing" } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.3.0" +seccompiler = "0.4.0" +thiserror = "1.0.31" [dev-dependencies] assert_matches = "1.4.0" diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index 53c287ea9709..e2211b97d87b 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -32,10 +32,9 @@ pub use sp_tracing; const LOG_TARGET: &str = "parachain::pvf-common"; use std::{ - io::{Read, Write}, + io::{self, Read, Write}, mem, }; -use tokio::io; #[cfg(feature = "test-utils")] pub mod tests { @@ -50,6 +49,8 @@ pub mod tests { pub struct SecurityStatus { /// Whether the landlock features we use are fully available on this system. pub can_enable_landlock: bool, + /// Whether the seccomp features we use are fully available on this system. + pub can_enable_seccomp: bool, // Whether we are able to unshare the user namespace and change the filesystem root. pub can_unshare_user_namespace_and_change_root: bool, } diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index e7b996ccdc3d..274a2fc80397 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -23,13 +23,12 @@ use cpu_time::ProcessTime; use futures::never::Never; use std::{ any::Any, - fmt, + fmt, io, os::unix::net::UnixStream, path::PathBuf, sync::mpsc::{Receiver, RecvTimeoutError}, time::Duration, }; -use tokio::{io, runtime::Runtime}; /// Use this macro to declare a `fn main() {}` that will create an executable that can be used for /// spawning the desired worker. @@ -85,6 +84,13 @@ macro_rules! decl_worker_main { let status = -1; std::process::exit(status) }, + "--check-can-enable-seccomp" => { + #[cfg(all(target_os = "linux", target_arch = "x86_64"))] + let status = if security::seccomp::check_is_fully_enabled() { 0 } else { -1 }; + #[cfg(not(all(target_os = "linux", target_arch = "x86_64")))] + let status = -1; + std::process::exit(status) + }, "--check-can-unshare-user-namespace-and-change-root" => { #[cfg(target_os = "linux")] let status = if let Err(err) = security::unshare_user_namespace_and_change_root( @@ -129,6 +135,7 @@ macro_rules! decl_worker_main { let mut worker_dir_path = None; let mut node_version = None; let mut can_enable_landlock = false; + let mut can_enable_seccomp = false; let mut can_unshare_user_namespace_and_change_root = false; let mut i = 2; @@ -147,6 +154,7 @@ macro_rules! decl_worker_main { i += 1 }, "--can-enable-landlock" => can_enable_landlock = true, + "--can-enable-seccomp" => can_enable_seccomp = true, "--can-unshare-user-namespace-and-change-root" => can_unshare_user_namespace_and_change_root = true, arg => panic!("Unexpected argument found: {}", arg), @@ -161,6 +169,7 @@ macro_rules! decl_worker_main { let worker_dir_path = std::path::Path::new(worker_dir_path).to_owned(); let security_status = $crate::SecurityStatus { can_enable_landlock, + can_enable_seccomp, can_unshare_user_namespace_and_change_root, }; @@ -198,7 +207,7 @@ impl fmt::Display for WorkerKind { // The worker version must be passed in so that we accurately get the version of the worker, and not // the version that this crate was compiled with. -pub fn worker_event_loop( +pub fn worker_event_loop( worker_kind: WorkerKind, socket_path: PathBuf, #[cfg_attr(not(target_os = "linux"), allow(unused_mut))] mut worker_dir_path: PathBuf, @@ -207,8 +216,7 @@ pub fn worker_event_loop( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] security_status: &SecurityStatus, mut event_loop: F, ) where - F: FnMut(UnixStream, PathBuf) -> Fut, - Fut: futures::Future>, + F: FnMut(UnixStream, PathBuf) -> io::Result, { let worker_pid = std::process::id(); gum::debug!( @@ -262,7 +270,7 @@ pub fn worker_event_loop( } // Connect to the socket. - let stream = || -> std::io::Result { + let stream = || -> io::Result { let stream = UnixStream::connect(&socket_path)?; let _ = std::fs::remove_file(&socket_path); Ok(stream) @@ -317,6 +325,24 @@ pub fn worker_event_loop( let landlock_status = security::landlock::enable_for_worker(worker_kind, worker_pid, &worker_dir_path); if !matches!(landlock_status, Ok(landlock::RulesetStatus::FullyEnforced)) { + // We previously were able to enable, so this should never happen. + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + "could not fully enable landlock: {:?}. This should not happen, please report an issue", + landlock_status + ); + } + } + + // TODO: We can enable the seccomp networking blacklist on aarch64 as well, but we need a CI + // job to catch regressions. See . + #[cfg(all(target_os = "linux", target_arch = "x86_64"))] + if security_status.can_enable_seccomp { + let seccomp_status = + security::seccomp::enable_for_worker(worker_kind, worker_pid, &worker_dir_path); + if !matches!(seccomp_status, Ok(())) { // We previously were able to enable, so this should never happen. // // TODO: Make this a real error in secure-mode. See: @@ -325,8 +351,8 @@ pub fn worker_event_loop( target: LOG_TARGET, %worker_kind, %worker_pid, - "could not fully enable landlock: {:?}. This should not happen, please report to the Polkadot devs", - landlock_status + "could not fully enable seccomp: {:?}. This should not happen, please report an issue", + seccomp_status ); } } @@ -346,18 +372,11 @@ pub fn worker_event_loop( } // Run the main worker loop. - let rt = Runtime::new().expect("Creates tokio runtime. If this panics the worker will die and the host will detect that and deal with it."); - let err = rt - .block_on(event_loop(stream, worker_dir_path)) + let err = event_loop(stream, worker_dir_path) // It's never `Ok` because it's `Ok(Never)`. .unwrap_err(); worker_shutdown_message(worker_kind, worker_pid, &err.to_string()); - - // We don't want tokio to wait for the tasks to finish. We want to bring down the worker as fast - // as possible and not wait for stalled validation to finish. This isn't strictly necessary now, - // but may be in the future. - rt.shutdown_background(); } /// Provide a consistent message on worker shutdown. @@ -438,7 +457,7 @@ fn kill_parent_node_in_emergency() { /// The motivation for this module is to coordinate worker threads without using async Rust. pub mod thread { use std::{ - panic, + io, panic, sync::{Arc, Condvar, Mutex}, thread, time::Duration, @@ -479,7 +498,7 @@ pub mod thread { f: F, cond: Cond, outcome: WaitOutcome, - ) -> std::io::Result> + ) -> io::Result> where F: FnOnce() -> R, F: Send + 'static + panic::UnwindSafe, @@ -497,7 +516,7 @@ pub mod thread { cond: Cond, outcome: WaitOutcome, stack_size: usize, - ) -> std::io::Result> + ) -> io::Result> where F: FnOnce() -> R, F: Send + 'static + panic::UnwindSafe, diff --git a/polkadot/node/core/pvf/common/src/worker/security.rs b/polkadot/node/core/pvf/common/src/worker/security.rs deleted file mode 100644 index 1b7614177448..000000000000 --- a/polkadot/node/core/pvf/common/src/worker/security.rs +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Functionality for securing workers. -//! -//! This is needed because workers are used to compile and execute untrusted code (PVFs). -//! -//! We currently employ the following security measures: -//! -//! - Restrict filesystem -//! - Use Landlock to remove all unnecessary FS access rights. -//! - Unshare the user and mount namespaces. -//! - Change the root directory to a worker-specific temporary directory. -//! - Remove env vars - -use crate::{worker::WorkerKind, LOG_TARGET}; - -/// Unshare the user namespace and change root to be the artifact directory. -/// -/// NOTE: This should not be called in a multi-threaded context. `unshare(2)`: -/// "CLONE_NEWUSER requires that the calling process is not threaded." -#[cfg(target_os = "linux")] -pub fn unshare_user_namespace_and_change_root( - worker_kind: WorkerKind, - worker_pid: u32, - worker_dir_path: &std::path::Path, -) -> Result<(), String> { - use std::{env, ffi::CString, os::unix::ffi::OsStrExt, path::Path, ptr}; - - // The following was copied from the `cstr_core` crate. - // - // TODO: Remove this once this is stable: https://github.com/rust-lang/rust/issues/105723 - #[inline] - #[doc(hidden)] - const fn cstr_is_valid(bytes: &[u8]) -> bool { - if bytes.is_empty() || bytes[bytes.len() - 1] != 0 { - return false - } - - let mut index = 0; - while index < bytes.len() - 1 { - if bytes[index] == 0 { - return false - } - index += 1; - } - true - } - - macro_rules! cstr { - ($e:expr) => {{ - const STR: &[u8] = concat!($e, "\0").as_bytes(); - const STR_VALID: bool = cstr_is_valid(STR); - let _ = [(); 0 - (!(STR_VALID) as usize)]; - #[allow(unused_unsafe)] - unsafe { - core::ffi::CStr::from_bytes_with_nul_unchecked(STR) - } - }} - } - - gum::debug!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?worker_dir_path, - "unsharing the user namespace and calling pivot_root", - ); - - let worker_dir_path_c = CString::new(worker_dir_path.as_os_str().as_bytes()) - .expect("on unix; the path will never contain 0 bytes; qed"); - - // Wrapper around all the work to prevent repetitive error handling. - // - // # Errors - // - // It's the caller's responsibility to call `Error::last_os_error`. Note that that alone does - // not give the context of which call failed, so we return a &str error. - || -> Result<(), &'static str> { - // SAFETY: We pass null-terminated C strings and use the APIs as documented. In fact, steps - // (2) and (3) are adapted from the example in pivot_root(2), with the additional - // change described in the `pivot_root(".", ".")` section. - unsafe { - // 1. `unshare` the user and the mount namespaces. - if libc::unshare(libc::CLONE_NEWUSER | libc::CLONE_NEWNS) < 0 { - return Err("unshare user and mount namespaces") - } - - // 2. Setup mounts. - // - // Ensure that new root and its parent mount don't have shared propagation (which would - // cause pivot_root() to return an error), and prevent propagation of mount events to - // the initial mount namespace. - if libc::mount( - ptr::null(), - cstr!("/").as_ptr(), - ptr::null(), - libc::MS_REC | libc::MS_PRIVATE, - ptr::null(), - ) < 0 - { - return Err("mount MS_PRIVATE") - } - // Ensure that the new root is a mount point. - let additional_flags = - if let WorkerKind::Execute | WorkerKind::CheckPivotRoot = worker_kind { - libc::MS_RDONLY - } else { - 0 - }; - if libc::mount( - worker_dir_path_c.as_ptr(), - worker_dir_path_c.as_ptr(), - ptr::null(), // ignored when MS_BIND is used - libc::MS_BIND | - libc::MS_REC | libc::MS_NOEXEC | - libc::MS_NODEV | libc::MS_NOSUID | - libc::MS_NOATIME | additional_flags, - ptr::null(), // ignored when MS_BIND is used - ) < 0 - { - return Err("mount MS_BIND") - } - - // 3. `pivot_root` to the artifact directory. - if libc::chdir(worker_dir_path_c.as_ptr()) < 0 { - return Err("chdir to worker dir path") - } - if libc::syscall(libc::SYS_pivot_root, cstr!(".").as_ptr(), cstr!(".").as_ptr()) < 0 { - return Err("pivot_root") - } - if libc::umount2(cstr!(".").as_ptr(), libc::MNT_DETACH) < 0 { - return Err("umount the old root mount point") - } - } - - Ok(()) - }() - .map_err(|err_ctx| { - let err = std::io::Error::last_os_error(); - format!("{}: {}", err_ctx, err) - })?; - - // Do some assertions. - if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { - return Err("expected current dir after pivot_root to be `/`".into()) - } - env::set_current_dir("..").map_err(|err| err.to_string())?; - if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { - return Err("expected not to be able to break out of new root by doing `..`".into()) - } - - Ok(()) -} - -/// Require env vars to have been removed when spawning the process, to prevent malicious code from -/// accessing them. -pub fn check_env_vars_were_cleared(worker_kind: WorkerKind, worker_pid: u32) -> bool { - let mut ok = true; - - for (key, value) in std::env::vars_os() { - // TODO: *theoretically* the value (or mere presence) of `RUST_LOG` can be a source of - // randomness for malicious code. In the future we can remove it also and log in the host; - // see . - if key == "RUST_LOG" { - continue - } - // An exception for MacOS. This is not a secure platform anyway, so we let it slide. - #[cfg(target_os = "macos")] - if key == "__CF_USER_TEXT_ENCODING" { - continue - } - - gum::error!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?key, - ?value, - "env var was present that should have been removed", - ); - - ok = false; - } - - ok -} - -/// The [landlock] docs say it best: -/// -/// > "Landlock is a security feature available since Linux 5.13. The goal is to enable to restrict -/// ambient rights (e.g., global filesystem access) for a set of processes by creating safe security -/// sandboxes as new security layers in addition to the existing system-wide access-controls. This -/// kind of sandbox is expected to help mitigate the security impact of bugs, unexpected or -/// malicious behaviors in applications. Landlock empowers any process, including unprivileged ones, -/// to securely restrict themselves." -/// -/// [landlock]: https://docs.rs/landlock/latest/landlock/index.html -#[cfg(target_os = "linux")] -pub mod landlock { - pub use landlock::RulesetStatus; - - use crate::{worker::WorkerKind, LOG_TARGET}; - use landlock::*; - use std::{ - fmt, - path::{Path, PathBuf}, - }; - - /// Landlock ABI version. We use ABI V1 because: - /// - /// 1. It is supported by our reference kernel version. - /// 2. Later versions do not (yet) provide additional security that would benefit us. - /// - /// # Versions (as of October 2023) - /// - /// - Polkadot reference kernel version: 5.16+ - /// - /// - ABI V1: kernel 5.13 - Introduces landlock, including full restrictions on file reads. - /// - /// - ABI V2: kernel 5.19 - Adds ability to prevent file renaming. Does not help us. During - /// execution an attacker can only affect the name of a symlinked artifact and not the - /// original one. - /// - /// - ABI V3: kernel 6.2 - Adds ability to prevent file truncation. During execution, can - /// prevent attackers from affecting a symlinked artifact. We don't strictly need this as we - /// plan to check for file integrity anyway; see - /// . - /// - /// # Determinism - /// - /// You may wonder whether we could always use the latest ABI instead of only the ABI supported - /// by the reference kernel version. It seems plausible, since landlock provides a best-effort - /// approach to enabling sandboxing. For example, if the reference version only supported V1 and - /// we were on V2, then landlock would use V2 if it was supported on the current machine, and - /// just fall back to V1 if not. - /// - /// The issue with this is indeterminacy. If half of validators were on V2 and half were on V1, - /// they may have different semantics on some PVFs. So a malicious PVF now has a new attack - /// vector: they can exploit this indeterminism between landlock ABIs! - /// - /// On the other hand we do want validators to be as secure as possible and protect their keys - /// from attackers. And, the risk with indeterminacy is low and there are other indeterminacy - /// vectors anyway. So we will only upgrade to a new ABI if either the reference kernel version - /// supports it or if it introduces some new feature that is beneficial to security. - pub const LANDLOCK_ABI: ABI = ABI::V1; - - #[derive(Debug)] - pub enum TryRestrictError { - InvalidExceptionPath(PathBuf), - RulesetError(RulesetError), - } - - impl From for TryRestrictError { - fn from(err: RulesetError) -> Self { - Self::RulesetError(err) - } - } - - impl fmt::Display for TryRestrictError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::InvalidExceptionPath(path) => write!(f, "invalid exception path: {:?}", path), - Self::RulesetError(err) => write!(f, "ruleset error: {}", err.to_string()), - } - } - } - - impl std::error::Error for TryRestrictError {} - - /// Try to enable landlock for the given kind of worker. - pub fn enable_for_worker( - worker_kind: WorkerKind, - worker_pid: u32, - worker_dir_path: &Path, - ) -> Result> { - let exceptions: Vec<(PathBuf, BitFlags)> = match worker_kind { - WorkerKind::Prepare => { - vec![(worker_dir_path.to_owned(), AccessFs::WriteFile.into())] - }, - WorkerKind::Execute => { - vec![(worker_dir_path.to_owned(), AccessFs::ReadFile.into())] - }, - WorkerKind::CheckPivotRoot => - panic!("this should only be passed for checking pivot_root; qed"), - }; - - gum::debug!( - target: LOG_TARGET, - %worker_kind, - %worker_pid, - ?worker_dir_path, - "enabling landlock with exceptions: {:?}", - exceptions, - ); - - Ok(try_restrict(exceptions)?) - } - - // TODO: - /// Runs a check for landlock and returns a single bool indicating whether the given landlock - /// ABI is fully enabled on the current Linux environment. - pub fn check_is_fully_enabled() -> bool { - let status_from_thread: Result> = - match std::thread::spawn(|| try_restrict(std::iter::empty::<(PathBuf, AccessFs)>())) - .join() - { - Ok(Ok(status)) => Ok(status), - Ok(Err(ruleset_err)) => Err(ruleset_err.into()), - Err(_err) => Err("a panic occurred in try_restrict".into()), - }; - - matches!(status_from_thread, Ok(RulesetStatus::FullyEnforced)) - } - - /// Tries to restrict the current thread (should only be called in a process' main thread) with - /// the following landlock access controls: - /// - /// 1. all global filesystem access restricted, with optional exceptions - /// 2. ... more sandbox types (e.g. networking) may be supported in the future. - /// - /// If landlock is not supported in the current environment this is simply a noop. - /// - /// # Returns - /// - /// The status of the restriction (whether it was fully, partially, or not-at-all enforced). - fn try_restrict(fs_exceptions: I) -> Result - where - I: IntoIterator, - P: AsRef, - A: Into>, - { - let mut ruleset = - Ruleset::default().handle_access(AccessFs::from_all(LANDLOCK_ABI))?.create()?; - for (fs_path, access_bits) in fs_exceptions { - let paths = &[fs_path.as_ref().to_owned()]; - let mut rules = path_beneath_rules(paths, access_bits).peekable(); - if rules.peek().is_none() { - // `path_beneath_rules` silently ignores missing paths, so check for it manually. - return Err(TryRestrictError::InvalidExceptionPath(fs_path.as_ref().to_owned())) - } - ruleset = ruleset.add_rules(rules)?; - } - let status = ruleset.restrict_self()?; - Ok(status.ruleset) - } - - #[cfg(test)] - mod tests { - use super::*; - use std::{fs, io::ErrorKind, thread}; - - #[test] - fn restricted_thread_cannot_read_file() { - // TODO: This would be nice: . - if !check_is_fully_enabled() { - return - } - - // Restricted thread cannot read from FS. - let handle = - thread::spawn(|| { - // Create, write, and read two tmp files. This should succeed before any - // landlock restrictions are applied. - const TEXT: &str = "foo"; - let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); - let path1 = tmpfile1.path(); - let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); - let path2 = tmpfile2.path(); - - fs::write(path1, TEXT).unwrap(); - let s = fs::read_to_string(path1).unwrap(); - assert_eq!(s, TEXT); - fs::write(path2, TEXT).unwrap(); - let s = fs::read_to_string(path2).unwrap(); - assert_eq!(s, TEXT); - - // Apply Landlock with a read exception for only one of the files. - let status = try_restrict(vec![(path1, AccessFs::ReadFile)]); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { - panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); - } - - // Try to read from both files, only tmpfile1 should succeed. - let result = fs::read_to_string(path1); - assert!(matches!( - result, - Ok(s) if s == TEXT - )); - let result = fs::read_to_string(path2); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - - // Apply Landlock for all files. - let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { - panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); - } - - // Try to read from tmpfile1 after landlock, it should fail. - let result = fs::read_to_string(path1); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - }); - - assert!(handle.join().is_ok()); - } - - #[test] - fn restricted_thread_cannot_write_file() { - // TODO: This would be nice: . - if !check_is_fully_enabled() { - return - } - - // Restricted thread cannot write to FS. - let handle = - thread::spawn(|| { - // Create and write two tmp files. This should succeed before any landlock - // restrictions are applied. - const TEXT: &str = "foo"; - let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); - let path1 = tmpfile1.path(); - let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); - let path2 = tmpfile2.path(); - - fs::write(path1, TEXT).unwrap(); - fs::write(path2, TEXT).unwrap(); - - // Apply Landlock with a write exception for only one of the files. - let status = try_restrict(vec![(path1, AccessFs::WriteFile)]); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { - panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); - } - - // Try to write to both files, only tmpfile1 should succeed. - let result = fs::write(path1, TEXT); - assert!(matches!(result, Ok(_))); - let result = fs::write(path2, TEXT); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - - // Apply Landlock for all files. - let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { - panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); - } - - // Try to write to tmpfile1 after landlock, it should fail. - let result = fs::write(path1, TEXT); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - }); - - assert!(handle.join().is_ok()); - } - - // Test that checks whether landlock under our ABI version is able to truncate files. - #[test] - fn restricted_thread_can_truncate_file() { - // TODO: This would be nice: . - if !check_is_fully_enabled() { - return - } - - // Restricted thread can truncate file. - let handle = - thread::spawn(|| { - // Create and write a file. This should succeed before any landlock - // restrictions are applied. - const TEXT: &str = "foo"; - let tmpfile = tempfile::NamedTempFile::new().unwrap(); - let path = tmpfile.path(); - - fs::write(path, TEXT).unwrap(); - - // Apply Landlock with all exceptions under the current ABI. - let status = try_restrict(vec![(path, AccessFs::from_all(LANDLOCK_ABI))]); - if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { - panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); - } - - // Try to truncate the file. - let result = tmpfile.as_file().set_len(0); - assert!(result.is_ok()); - }); - - assert!(handle.join().is_ok()); - } - } -} diff --git a/polkadot/node/core/pvf/common/src/worker/security/landlock.rs b/polkadot/node/core/pvf/common/src/worker/security/landlock.rs new file mode 100644 index 000000000000..51500c733b8c --- /dev/null +++ b/polkadot/node/core/pvf/common/src/worker/security/landlock.rs @@ -0,0 +1,325 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The [landlock] docs say it best: +//! +//! > "Landlock is a security feature available since Linux 5.13. The goal is to enable to restrict +//! ambient rights (e.g., global filesystem access) for a set of processes by creating safe security +//! sandboxes as new security layers in addition to the existing system-wide access-controls. This +//! kind of sandbox is expected to help mitigate the security impact of bugs, unexpected or +//! malicious behaviors in applications. Landlock empowers any process, including unprivileged ones, +//! to securely restrict themselves." +//! +//! [landlock]: https://docs.rs/landlock/latest/landlock/index.html + +pub use landlock::RulesetStatus; + +use crate::{ + worker::{stringify_panic_payload, WorkerKind}, + LOG_TARGET, +}; +use landlock::*; +use std::path::{Path, PathBuf}; + +/// Landlock ABI version. We use ABI V1 because: +/// +/// 1. It is supported by our reference kernel version. +/// 2. Later versions do not (yet) provide additional security that would benefit us. +/// +/// # Versions (as of October 2023) +/// +/// - Polkadot reference kernel version: 5.16+ +/// +/// - ABI V1: kernel 5.13 - Introduces landlock, including full restrictions on file reads. +/// +/// - ABI V2: kernel 5.19 - Adds ability to prevent file renaming. Does not help us. During +/// execution an attacker can only affect the name of a symlinked artifact and not the original +/// one. +/// +/// - ABI V3: kernel 6.2 - Adds ability to prevent file truncation. During execution, can +/// prevent attackers from affecting a symlinked artifact. We don't strictly need this as we +/// plan to check for file integrity anyway; see +/// . +/// +/// # Determinism +/// +/// You may wonder whether we could always use the latest ABI instead of only the ABI supported +/// by the reference kernel version. It seems plausible, since landlock provides a best-effort +/// approach to enabling sandboxing. For example, if the reference version only supported V1 and +/// we were on V2, then landlock would use V2 if it was supported on the current machine, and +/// just fall back to V1 if not. +/// +/// The issue with this is indeterminacy. If half of validators were on V2 and half were on V1, +/// they may have different semantics on some PVFs. So a malicious PVF now has a new attack +/// vector: they can exploit this indeterminism between landlock ABIs! +/// +/// On the other hand we do want validators to be as secure as possible and protect their keys +/// from attackers. And, the risk with indeterminacy is low and there are other indeterminacy +/// vectors anyway. So we will only upgrade to a new ABI if either the reference kernel version +/// supports it or if it introduces some new feature that is beneficial to security. +pub const LANDLOCK_ABI: ABI = ABI::V1; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Invalid exception path: {0:?}")] + InvalidExceptionPath(PathBuf), + #[error(transparent)] + RulesetError(#[from] RulesetError), + #[error("A panic occurred in try_restrict: {0}")] + Panic(String), +} + +pub type Result = std::result::Result; + +/// Try to enable landlock for the given kind of worker. +pub fn enable_for_worker( + worker_kind: WorkerKind, + worker_pid: u32, + worker_dir_path: &Path, +) -> Result { + let exceptions: Vec<(PathBuf, BitFlags)> = match worker_kind { + WorkerKind::Prepare => { + vec![(worker_dir_path.to_owned(), AccessFs::WriteFile.into())] + }, + WorkerKind::Execute => { + vec![(worker_dir_path.to_owned(), AccessFs::ReadFile.into())] + }, + WorkerKind::CheckPivotRoot => + panic!("this should only be passed for checking pivot_root; qed"), + }; + + gum::trace!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "enabling landlock with exceptions: {:?}", + exceptions, + ); + + try_restrict(exceptions) +} + +// TODO: +/// Runs a check for landlock and returns a single bool indicating whether the given landlock +/// ABI is fully enabled on the current Linux environment. +pub fn check_is_fully_enabled() -> bool { + let status_from_thread: Result = + match std::thread::spawn(|| try_restrict(std::iter::empty::<(PathBuf, AccessFs)>())).join() + { + Ok(Ok(status)) => Ok(status), + Ok(Err(ruleset_err)) => Err(ruleset_err.into()), + Err(err) => Err(Error::Panic(stringify_panic_payload(err))), + }; + + matches!(status_from_thread, Ok(RulesetStatus::FullyEnforced)) +} + +/// Tries to restrict the current thread (should only be called in a process' main thread) with +/// the following landlock access controls: +/// +/// 1. all global filesystem access restricted, with optional exceptions +/// 2. ... more sandbox types (e.g. networking) may be supported in the future. +/// +/// If landlock is not supported in the current environment this is simply a noop. +/// +/// # Returns +/// +/// The status of the restriction (whether it was fully, partially, or not-at-all enforced). +fn try_restrict(fs_exceptions: I) -> Result +where + I: IntoIterator, + P: AsRef, + A: Into>, +{ + let mut ruleset = + Ruleset::default().handle_access(AccessFs::from_all(LANDLOCK_ABI))?.create()?; + for (fs_path, access_bits) in fs_exceptions { + let paths = &[fs_path.as_ref().to_owned()]; + let mut rules = path_beneath_rules(paths, access_bits).peekable(); + if rules.peek().is_none() { + // `path_beneath_rules` silently ignores missing paths, so check for it manually. + return Err(Error::InvalidExceptionPath(fs_path.as_ref().to_owned())) + } + ruleset = ruleset.add_rules(rules)?; + } + let status = ruleset.restrict_self()?; + Ok(status.ruleset) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{fs, io::ErrorKind, thread}; + + #[test] + fn restricted_thread_cannot_read_file() { + // TODO: This would be nice: . + if !check_is_fully_enabled() { + return + } + + // Restricted thread cannot read from FS. + let handle = thread::spawn(|| { + // Create, write, and read two tmp files. This should succeed before any + // landlock restrictions are applied. + const TEXT: &str = "foo"; + let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); + let path1 = tmpfile1.path(); + let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); + let path2 = tmpfile2.path(); + + fs::write(path1, TEXT).unwrap(); + let s = fs::read_to_string(path1).unwrap(); + assert_eq!(s, TEXT); + fs::write(path2, TEXT).unwrap(); + let s = fs::read_to_string(path2).unwrap(); + assert_eq!(s, TEXT); + + // Apply Landlock with a read exception for only one of the files. + let status = try_restrict(vec![(path1, AccessFs::ReadFile)]); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to read from both files, only tmpfile1 should succeed. + let result = fs::read_to_string(path1); + assert!(matches!( + result, + Ok(s) if s == TEXT + )); + let result = fs::read_to_string(path2); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + + // Apply Landlock for all files. + let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to read from tmpfile1 after landlock, it should fail. + let result = fs::read_to_string(path1); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + }); + + assert!(handle.join().is_ok()); + } + + #[test] + fn restricted_thread_cannot_write_file() { + // TODO: This would be nice: . + if !check_is_fully_enabled() { + return + } + + // Restricted thread cannot write to FS. + let handle = thread::spawn(|| { + // Create and write two tmp files. This should succeed before any landlock + // restrictions are applied. + const TEXT: &str = "foo"; + let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); + let path1 = tmpfile1.path(); + let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); + let path2 = tmpfile2.path(); + + fs::write(path1, TEXT).unwrap(); + fs::write(path2, TEXT).unwrap(); + + // Apply Landlock with a write exception for only one of the files. + let status = try_restrict(vec![(path1, AccessFs::WriteFile)]); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to write to both files, only tmpfile1 should succeed. + let result = fs::write(path1, TEXT); + assert!(matches!(result, Ok(_))); + let result = fs::write(path2, TEXT); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + + // Apply Landlock for all files. + let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to write to tmpfile1 after landlock, it should fail. + let result = fs::write(path1, TEXT); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + }); + + assert!(handle.join().is_ok()); + } + + // Test that checks whether landlock under our ABI version is able to truncate files. + #[test] + fn restricted_thread_can_truncate_file() { + // TODO: This would be nice: . + if !check_is_fully_enabled() { + return + } + + // Restricted thread can truncate file. + let handle = thread::spawn(|| { + // Create and write a file. This should succeed before any landlock + // restrictions are applied. + const TEXT: &str = "foo"; + let tmpfile = tempfile::NamedTempFile::new().unwrap(); + let path = tmpfile.path(); + + fs::write(path, TEXT).unwrap(); + + // Apply Landlock with all exceptions under the current ABI. + let status = try_restrict(vec![(path, AccessFs::from_all(LANDLOCK_ABI))]); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!( + "Ruleset should be enforced since we checked if landlock is enabled: {:?}", + status + ); + } + + // Try to truncate the file. + let result = tmpfile.as_file().set_len(0); + assert!(result.is_ok()); + }); + + assert!(handle.join().is_ok()); + } +} diff --git a/polkadot/node/core/pvf/common/src/worker/security/mod.rs b/polkadot/node/core/pvf/common/src/worker/security/mod.rs new file mode 100644 index 000000000000..9a38ed172773 --- /dev/null +++ b/polkadot/node/core/pvf/common/src/worker/security/mod.rs @@ -0,0 +1,189 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Functionality for securing workers. +//! +//! This is needed because workers are used to compile and execute untrusted code (PVFs). +//! +//! We currently employ the following security measures: +//! +//! - Restrict filesystem +//! - Use Landlock to remove all unnecessary FS access rights. +//! - Unshare the user and mount namespaces. +//! - Change the root directory to a worker-specific temporary directory. +//! - Restrict networking by blocking socket creation and io_uring. +//! - Remove env vars + +use crate::{worker::WorkerKind, LOG_TARGET}; + +#[cfg(target_os = "linux")] +pub mod landlock; + +#[cfg(all(target_os = "linux", target_arch = "x86_64"))] +pub mod seccomp; + +/// Unshare the user namespace and change root to be the artifact directory. +/// +/// NOTE: This should not be called in a multi-threaded context. `unshare(2)`: +/// "CLONE_NEWUSER requires that the calling process is not threaded." +#[cfg(target_os = "linux")] +pub fn unshare_user_namespace_and_change_root( + worker_kind: WorkerKind, + worker_pid: u32, + worker_dir_path: &std::path::Path, +) -> Result<(), String> { + use std::{env, ffi::CString, os::unix::ffi::OsStrExt, path::Path, ptr}; + + // TODO: Remove this once this is stable: https://github.com/rust-lang/rust/issues/105723 + macro_rules! cstr_ptr { + ($e:expr) => { + concat!($e, "\0").as_ptr().cast::() + }; + } + + gum::trace!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "unsharing the user namespace and calling pivot_root", + ); + + let worker_dir_path_c = CString::new(worker_dir_path.as_os_str().as_bytes()) + .expect("on unix; the path will never contain 0 bytes; qed"); + + // Wrapper around all the work to prevent repetitive error handling. + // + // # Errors + // + // It's the caller's responsibility to call `Error::last_os_error`. Note that that alone does + // not give the context of which call failed, so we return a &str error. + || -> Result<(), &'static str> { + // SAFETY: We pass null-terminated C strings and use the APIs as documented. In fact, steps + // (2) and (3) are adapted from the example in pivot_root(2), with the additional + // change described in the `pivot_root(".", ".")` section. + unsafe { + // 1. `unshare` the user and the mount namespaces. + if libc::unshare(libc::CLONE_NEWUSER | libc::CLONE_NEWNS) < 0 { + return Err("unshare user and mount namespaces") + } + + // 2. Setup mounts. + // + // Ensure that new root and its parent mount don't have shared propagation (which would + // cause pivot_root() to return an error), and prevent propagation of mount events to + // the initial mount namespace. + if libc::mount( + ptr::null(), + cstr_ptr!("/"), + ptr::null(), + libc::MS_REC | libc::MS_PRIVATE, + ptr::null(), + ) < 0 + { + return Err("mount MS_PRIVATE") + } + // Ensure that the new root is a mount point. + let additional_flags = + if let WorkerKind::Execute | WorkerKind::CheckPivotRoot = worker_kind { + libc::MS_RDONLY + } else { + 0 + }; + if libc::mount( + worker_dir_path_c.as_ptr(), + worker_dir_path_c.as_ptr(), + ptr::null(), // ignored when MS_BIND is used + libc::MS_BIND | + libc::MS_REC | libc::MS_NOEXEC | + libc::MS_NODEV | libc::MS_NOSUID | + libc::MS_NOATIME | additional_flags, + ptr::null(), // ignored when MS_BIND is used + ) < 0 + { + return Err("mount MS_BIND") + } + + // 3. `pivot_root` to the artifact directory. + if libc::chdir(worker_dir_path_c.as_ptr()) < 0 { + return Err("chdir to worker dir path") + } + if libc::syscall(libc::SYS_pivot_root, cstr_ptr!("."), cstr_ptr!(".")) < 0 { + return Err("pivot_root") + } + if libc::umount2(cstr_ptr!("."), libc::MNT_DETACH) < 0 { + return Err("umount the old root mount point") + } + } + + Ok(()) + }() + .map_err(|err_ctx| { + let err = std::io::Error::last_os_error(); + format!("{}: {}", err_ctx, err) + })?; + + // Do some assertions. + if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { + return Err("expected current dir after pivot_root to be `/`".into()) + } + env::set_current_dir("..").map_err(|err| err.to_string())?; + if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { + return Err("expected not to be able to break out of new root by doing `..`".into()) + } + + Ok(()) +} + +/// Require env vars to have been removed when spawning the process, to prevent malicious code from +/// accessing them. +pub fn check_env_vars_were_cleared(worker_kind: WorkerKind, worker_pid: u32) -> bool { + gum::trace!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + "clearing env vars in worker", + ); + + let mut ok = true; + + for (key, value) in std::env::vars_os() { + // TODO: *theoretically* the value (or mere presence) of `RUST_LOG` can be a source of + // randomness for malicious code. In the future we can remove it also and log in the host; + // see . + if key == "RUST_LOG" { + continue + } + // An exception for MacOS. This is not a secure platform anyway, so we let it slide. + #[cfg(target_os = "macos")] + if key == "__CF_USER_TEXT_ENCODING" { + continue + } + + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?key, + ?value, + "env var was present that should have been removed", + ); + + ok = false; + } + + ok +} diff --git a/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs b/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs new file mode 100644 index 000000000000..5539ad284400 --- /dev/null +++ b/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs @@ -0,0 +1,201 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Functionality for sandboxing workers by restricting their capabilities by blocking certain +//! syscalls with seccomp. +//! +//! For security we block the following: +//! +//! - creation of new sockets - these are unneeded in PVF jobs, and we can safely block them without +//! affecting consensus. +//! +//! - `io_uring` - allows for networking and needs to be blocked. See below for a discussion on the +//! safety of doing this. +//! +//! # Safety of blocking io_uring +//! +//! `io_uring` is just a way of issuing system calls in an async manner, and there is nothing +//! stopping wasmtime from legitimately using it. Fortunately, at the moment it does not. Generally, +//! not many applications use `io_uring` in production yet, because of the numerous kernel CVEs +//! discovered. It's still under a lot of development. Android outright banned `io_uring` for these +//! reasons. +//! +//! Considering `io_uring`'s status discussed above, and that it very likely would get detected +//! either by our [static analysis](https://github.com/paritytech/polkadot-sdk/pull/1663) or by +//! testing, we think it is safe to block it. +//! +//! ## Consensus analysis +//! +//! If execution hits an edge case code path unique to a given machine, it's already taken a +//! non-deterministic branch anyway. After all, we just care that the majority of validators reach +//! the same result and preserve consensus. So worst-case scenario, there's a dispute, and we can +//! always admit fault and refund the wrong validator. On the other hand, if all validators take the +//! code path that results in a seccomp violation, then they would all vote against the current +//! candidate, which is also fine. The violation would get logged (in big scary letters) and +//! hopefully some validator reports it to us. +//! +//! Actually, a worst-worse-case scenario is that 50% of validators vote against, so that there is +//! no consensus. But so many things would have to go wrong for that to happen: +//! +//! 1. An update to `wasmtime` is introduced that uses io_uring (unlikely as io_uring is mainly for +//! IO-heavy applications) +//! +//! 2. The new syscall is not detected by our static analysis +//! +//! 3. It is never triggered in any of our tests +//! +//! 4. It then gets triggered on some super edge case in production on 50% of validators causing a +//! stall (bad but very unlikely) +//! +//! 5. Or, it triggers on only a few validators causing a dispute (more likely but not as bad) +//! +//! Considering how many things would have to go wrong here, we believe it's safe to block +//! `io_uring`. +//! +//! # Action on syscall violations +//! +//! On syscall violations we currently only log, to make sure this works correctly before enforcing. +//! +//! In the future, when a forbidden syscall is attempted we immediately kill the process in order to +//! prevent the attacker from doing anything else. In execution, this will result in voting against +//! the candidate. + +use crate::{ + worker::{stringify_panic_payload, WorkerKind}, + LOG_TARGET, +}; +use seccompiler::*; +use std::{collections::BTreeMap, path::Path}; + +/// The action to take on caught syscalls. +#[cfg(not(test))] +const CAUGHT_ACTION: SeccompAction = SeccompAction::Log; +/// Don't kill the process when testing. +#[cfg(test)] +const CAUGHT_ACTION: SeccompAction = SeccompAction::Errno(libc::EACCES as u32); + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + Seccomp(#[from] seccompiler::Error), + #[error(transparent)] + Backend(#[from] seccompiler::BackendError), + #[error("A panic occurred in try_restrict: {0}")] + Panic(String), +} + +pub type Result = std::result::Result; + +/// Try to enable seccomp for the given kind of worker. +pub fn enable_for_worker( + worker_kind: WorkerKind, + worker_pid: u32, + worker_dir_path: &Path, +) -> Result<()> { + gum::trace!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "enabling seccomp", + ); + + try_restrict() +} + +/// Runs a check for seccomp and returns a single bool indicating whether seccomp with our rules is +/// fully enabled on the current Linux environment. +pub fn check_is_fully_enabled() -> bool { + let status_from_thread: Result<()> = match std::thread::spawn(|| try_restrict()).join() { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(err.into()), + Err(err) => Err(Error::Panic(stringify_panic_payload(err))), + }; + + matches!(status_from_thread, Ok(())) +} + +/// Applies a `seccomp` filter to disable networking for the PVF threads. +pub fn try_restrict() -> Result<()> { + // Build a `seccomp` filter which by default allows all syscalls except those blocked in the + // blacklist. + let mut blacklisted_rules = BTreeMap::default(); + + // Restrict the creation of sockets. + blacklisted_rules.insert(libc::SYS_socketpair, vec![]); + blacklisted_rules.insert(libc::SYS_socket, vec![]); + + // Prevent connecting to sockets for extra safety. + blacklisted_rules.insert(libc::SYS_connect, vec![]); + + // Restrict io_uring. + blacklisted_rules.insert(libc::SYS_io_uring_setup, vec![]); + blacklisted_rules.insert(libc::SYS_io_uring_enter, vec![]); + blacklisted_rules.insert(libc::SYS_io_uring_register, vec![]); + + let filter = SeccompFilter::new( + blacklisted_rules, + // Mismatch action: what to do if not in rule list. + SeccompAction::Allow, + // Match action: what to do if in rule list. + CAUGHT_ACTION, + TargetArch::x86_64, + )?; + + let bpf_prog: BpfProgram = filter.try_into()?; + + // Applies filter (runs seccomp) to the calling thread. + seccompiler::apply_filter(&bpf_prog)?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{io::ErrorKind, net::TcpListener, thread}; + + #[test] + fn sandboxed_thread_cannot_use_sockets() { + // TODO: This would be nice: . + if !check_is_fully_enabled() { + return + } + + let handle = thread::spawn(|| { + // Open a socket, this should succeed before seccomp is applied. + TcpListener::bind("127.0.0.1:0").unwrap(); + + let status = try_restrict(); + if !matches!(status, Ok(())) { + panic!("Ruleset should be enforced since we checked if seccomp is enabled"); + } + + // Try to open a socket after seccomp. + assert!(matches!( + TcpListener::bind("127.0.0.1:0"), + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + + // Other syscalls should still work. + unsafe { + assert!(libc::getppid() > 0); + } + }); + + assert!(handle.join().is_ok()); + } +} diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 23678d95696e..203bbd0e7859 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -11,7 +11,6 @@ cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } rayon = "1.5.1" -tokio = { version = "1.24.2", features = ["fs", "process"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index af73eb16e685..8872f9bc8dd3 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -39,12 +39,12 @@ use polkadot_node_core_pvf_common::{ use polkadot_parachain_primitives::primitives::ValidationResult; use polkadot_primitives::{executor_params::DEFAULT_NATIVE_STACK_MAX, ExecutorParams}; use std::{ + io, os::unix::net::UnixStream, path::PathBuf, sync::{mpsc::channel, Arc}, time::Duration, }; -use tokio::io; // Wasmtime powers the Substrate Executor. It compiles the wasm bytecode into native code. // That native code does not create any stacks and just reuses the stack of the thread that @@ -138,7 +138,7 @@ pub fn worker_entrypoint( node_version, worker_version, &security_status, - |mut stream, worker_dir_path| async move { + |mut stream, worker_dir_path| { let worker_pid = std::process::id(); let artifact_path = worker_dir::execute_artifact(&worker_dir_path); diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 886209b78c32..eb53ebdc941b 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -13,7 +13,6 @@ gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" rayon = "1.5.1" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } -tokio = { version = "1.24.2", features = ["fs", "process"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index fa5d3656a35e..926d9cabe182 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -45,12 +45,12 @@ use polkadot_node_core_pvf_common::{ }; use polkadot_primitives::ExecutorParams; use std::{ + fs, io, os::unix::net::UnixStream, path::PathBuf, sync::{mpsc::channel, Arc}, time::Duration, }; -use tokio::io; /// Contains the bytes for a successfully compiled artifact. pub struct CompiledArtifact(Vec); @@ -131,7 +131,7 @@ pub fn worker_entrypoint( node_version, worker_version, &security_status, - |mut stream, worker_dir_path| async move { + |mut stream, worker_dir_path| { let worker_pid = std::process::id(); let temp_artifact_dest = worker_dir::prepare_tmp_artifact(&worker_dir_path); @@ -229,8 +229,7 @@ pub fn worker_entrypoint( // Stop the memory stats worker and get its observed memory stats. #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, worker_pid) - .await; + let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, worker_pid); let memory_stats = MemoryStats { #[cfg(any( target_os = "linux", @@ -255,7 +254,7 @@ pub fn worker_entrypoint( "worker: writing artifact to {}", temp_artifact_dest.display(), ); - tokio::fs::write(&temp_artifact_dest, &artifact).await?; + fs::write(&temp_artifact_dest, &artifact)?; Ok(PrepareStats { cpu_time_elapsed, memory_stats }) }, diff --git a/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs b/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs index c70ff56fc84d..5f577b0901c2 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/memory_stats.rs @@ -122,7 +122,7 @@ pub mod memory_tracker { } /// Helper function to get the stats from the memory tracker. Helps isolate this error handling. - pub async fn get_memory_tracker_loop_stats( + pub fn get_memory_tracker_loop_stats( thread: JoinHandle>, worker_pid: u32, ) -> Option { diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_intf.rs index 783c7c7abbc8..61264f7d517d 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_intf.rs @@ -18,6 +18,7 @@ use crate::{ artifacts::ArtifactPathId, + security, worker_intf::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, @@ -106,7 +107,7 @@ pub enum Outcome { /// returns the outcome. /// /// NOTE: Not returning the idle worker token in `Outcome` will trigger the child process being -/// killed. +/// killed, if it's still alive. pub async fn start_work( worker: IdleWorker, artifact: ArtifactPathId, @@ -124,7 +125,10 @@ pub async fn start_work( artifact.path.display(), ); + let artifact_path = artifact.path.clone(); with_worker_dir_setup(worker_dir, pid, &artifact.path, |worker_dir| async move { + let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await; + if let Err(error) = send_request(&mut stream, &validation_params, execution_timeout).await { gum::warn!( target: LOG_TARGET, @@ -153,9 +157,38 @@ pub async fn start_work( ?error, "failed to recv an execute response", ); + // The worker died. Check if it was due to a seccomp violation. + // + // NOTE: Log, but don't change the outcome. Not all validators may have + // auditing enabled, so we don't want attackers to abuse a non-deterministic + // outcome. + for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { + gum::error!( + target: LOG_TARGET, + worker_pid = %pid, + %syscall, + validation_code_hash = ?artifact.id.code_hash, + ?artifact_path, + "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" + ); + } + return Outcome::IoErr }, Ok(response) => { + // Check if any syscall violations occurred during the job. For now this is + // only informative, as we are not enforcing the seccomp policy yet. + for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { + gum::error!( + target: LOG_TARGET, + worker_pid = %pid, + %syscall, + validation_code_hash = ?artifact.id.code_hash, + ?artifact_path, + "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" + ); + } + if let Response::Ok{duration, ..} = response { if duration > execution_timeout { // The job didn't complete within the timeout. diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 6c9606bb2f3c..dd0bd8581985 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -24,12 +24,12 @@ use crate::{ artifacts::{ArtifactId, ArtifactPathId, ArtifactState, Artifacts}, execute::{self, PendingExecutionRequest}, metrics::Metrics, - prepare, Priority, ValidationError, LOG_TARGET, + prepare, security, Priority, ValidationError, LOG_TARGET, }; use always_assert::never; use futures::{ channel::{mpsc, oneshot}, - Future, FutureExt, SinkExt, StreamExt, + join, Future, FutureExt, SinkExt, StreamExt, }; use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult}, @@ -153,6 +153,7 @@ pub struct Config { pub cache_path: PathBuf, /// The version of the node. `None` can be passed to skip the version check (only for tests). pub node_version: Option, + /// The path to the program that can be used to spawn the prepare workers. pub prepare_worker_program_path: PathBuf, /// The time allotted for a prepare worker to spawn and report to the host. @@ -162,6 +163,7 @@ pub struct Config { pub prepare_workers_soft_max_num: usize, /// The absolute number of workers that can be spawned in the prepare pool. pub prepare_workers_hard_max_num: usize, + /// The path to the program that can be used to spawn the execute workers. pub execute_worker_program_path: PathBuf, /// The time allotted for an execute worker to spawn and report to the host. @@ -181,10 +183,12 @@ impl Config { Self { cache_path, node_version, + prepare_worker_program_path, prepare_worker_spawn_timeout: Duration::from_secs(3), prepare_workers_soft_max_num: 1, prepare_workers_hard_max_num: 1, + execute_worker_program_path, execute_worker_spawn_timeout: Duration::from_secs(3), execute_workers_max_num: 2, @@ -200,15 +204,24 @@ impl Config { /// The future should not return normally but if it does then that indicates an unrecoverable error. /// In that case all pending requests will be canceled, dropping the result senders and new ones /// will be rejected. -pub fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future) { +pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future) { gum::debug!(target: LOG_TARGET, ?config, "starting PVF validation host"); // Run checks for supported security features once per host startup. Warn here if not enabled. let security_status = { - let can_enable_landlock = check_landlock(&config.prepare_worker_program_path); - let can_unshare_user_namespace_and_change_root = - check_can_unshare_user_namespace_and_change_root(&config.prepare_worker_program_path); - SecurityStatus { can_enable_landlock, can_unshare_user_namespace_and_change_root } + // TODO: add check that syslog is available and that seccomp violations are logged? + let (can_enable_landlock, can_enable_seccomp, can_unshare_user_namespace_and_change_root) = join!( + security::check_landlock(&config.prepare_worker_program_path), + security::check_seccomp(&config.prepare_worker_program_path), + security::check_can_unshare_user_namespace_and_change_root( + &config.prepare_worker_program_path + ) + ); + SecurityStatus { + can_enable_landlock, + can_enable_seccomp, + can_unshare_user_namespace_and_change_root, + } }; let (to_host_tx, to_host_rx) = mpsc::channel(10); @@ -882,105 +895,6 @@ fn pulse_every(interval: std::time::Duration) -> impl futures::Stream .map(|_| ()) } -/// Check if we can sandbox the root and emit a warning if not. -/// -/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible -/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on -/// success and -1 on failure. -fn check_can_unshare_user_namespace_and_change_root( - #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] - prepare_worker_program_path: &Path, -) -> bool { - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - let output = std::process::Command::new(prepare_worker_program_path) - .arg("--check-can-unshare-user-namespace-and-change-root") - .output(); - - match output { - Ok(output) if output.status.success() => true, - Ok(output) => { - let stderr = std::str::from_utf8(&output.stderr) - .expect("child process writes a UTF-8 string to stderr; qed") - .trim(); - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - // Docs say to always print status using `Display` implementation. - status = %output.status, - %stderr, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running with support for unsharing user namespaces for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false - }, - } - } else { - gum::warn!( - target: LOG_TARGET, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with support for unsharing user namespaces for maximum security." - ); - false - } - } -} - -/// Check if landlock is supported and emit a warning if not. -/// -/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible -/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on -/// success and -1 on failure. -fn check_landlock( - #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] - prepare_worker_program_path: &Path, -) -> bool { - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - match std::process::Command::new(prepare_worker_program_path) - .arg("--check-can-enable-landlock") - .status() - { - Ok(status) if status.success() => true, - Ok(status) => { - let abi = - polkadot_node_core_pvf_common::worker::security::landlock::LANDLOCK_ABI as u8; - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - ?status, - %abi, - "Cannot fully enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false - }, - } - } else { - gum::warn!( - target: LOG_TARGET, - "Cannot enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with landlock support for maximum security." - ); - false - } - } -} - #[cfg(test)] pub(crate) mod tests { use super::*; diff --git a/polkadot/node/core/pvf/src/lib.rs b/polkadot/node/core/pvf/src/lib.rs index 27630af40c2f..102a91dbdad7 100644 --- a/polkadot/node/core/pvf/src/lib.rs +++ b/polkadot/node/core/pvf/src/lib.rs @@ -97,6 +97,7 @@ mod host; mod metrics; mod prepare; mod priority; +mod security; mod worker_intf; #[cfg(feature = "test-utils")] diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs index b66c36044343..1f6ab0b76556 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs @@ -18,6 +18,7 @@ use crate::{ metrics::Metrics, + security, worker_intf::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, @@ -126,7 +127,9 @@ pub async fn start_work( pid, |tmp_artifact_file, mut stream, worker_dir| async move { let preparation_timeout = pvf.prep_timeout(); - if let Err(err) = send_request(&mut stream, pvf).await { + let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await; + + if let Err(err) = send_request(&mut stream, pvf.clone()).await { gum::warn!( target: LOG_TARGET, worker_pid = %pid, @@ -150,7 +153,19 @@ pub async fn start_work( match result { // Received bytes from worker within the time limit. - Ok(Ok(prepare_result)) => + Ok(Ok(prepare_result)) => { + // Check if any syscall violations occurred during the job. For now this is only + // informative, as we are not enforcing the seccomp policy yet. + for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { + gum::error!( + target: LOG_TARGET, + worker_pid = %pid, + %syscall, + ?pvf, + "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" + ); + } + handle_response( metrics, IdleWorker { stream, pid, worker_dir }, @@ -160,7 +175,8 @@ pub async fn start_work( artifact_path, preparation_timeout, ) - .await, + .await + }, Ok(Err(err)) => { // Communication error within the time limit. gum::warn!( @@ -169,6 +185,21 @@ pub async fn start_work( "failed to recv a prepare response: {:?}", err, ); + + // The worker died. Check if it was due to a seccomp violation. + // + // NOTE: Log, but don't change the outcome. Not all validators may have auditing + // enabled, so we don't want attackers to abuse a non-deterministic outcome. + for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { + gum::error!( + target: LOG_TARGET, + worker_pid = %pid, + %syscall, + ?pvf, + "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" + ); + } + Outcome::IoErr(err.to_string()) }, Err(_) => { diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs new file mode 100644 index 000000000000..decd321e415e --- /dev/null +++ b/polkadot/node/core/pvf/src/security.rs @@ -0,0 +1,312 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::LOG_TARGET; +use std::path::Path; +use tokio::{ + fs::{File, OpenOptions}, + io::{AsyncReadExt, AsyncSeekExt, SeekFrom}, +}; + +/// Check if we can sandbox the root and emit a warning if not. +/// +/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible +/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on +/// success and -1 on failure. +pub async fn check_can_unshare_user_namespace_and_change_root( + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] + prepare_worker_program_path: &Path, +) -> bool { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + match tokio::process::Command::new(prepare_worker_program_path) + .arg("--check-can-unshare-user-namespace-and-change-root") + .output() + .await + { + Ok(output) if output.status.success() => true, + Ok(output) => { + let stderr = std::str::from_utf8(&output.stderr) + .expect("child process writes a UTF-8 string to stderr; qed") + .trim(); + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + // Docs say to always print status using `Display` implementation. + status = %output.status, + %stderr, + "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running with support for unsharing user namespaces for maximum security." + ); + false + }, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + "Could not start child process: {}", + err + ); + false + }, + } + } else { + gum::warn!( + target: LOG_TARGET, + "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with support for unsharing user namespaces for maximum security." + ); + false + } + } +} + +/// Check if landlock is supported and emit a warning if not. +/// +/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible +/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on +/// success and -1 on failure. +pub async fn check_landlock( + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] + prepare_worker_program_path: &Path, +) -> bool { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + match tokio::process::Command::new(prepare_worker_program_path) + .arg("--check-can-enable-landlock") + .status() + .await + { + Ok(status) if status.success() => true, + Ok(status) => { + let abi = + polkadot_node_core_pvf_common::worker::security::landlock::LANDLOCK_ABI as u8; + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + ?status, + %abi, + "Cannot fully enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." + ); + false + }, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + "Could not start child process: {}", + err + ); + false + }, + } + } else { + gum::warn!( + target: LOG_TARGET, + "Cannot enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with landlock support for maximum security." + ); + false + } + } +} + +/// Check if seccomp is supported and emit a warning if not. +/// +/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible +/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on +/// success and -1 on failure. +pub async fn check_seccomp( + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] + prepare_worker_program_path: &Path, +) -> bool { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + match tokio::process::Command::new(prepare_worker_program_path) + .arg("--check-can-enable-seccomp") + .status() + .await + { + Ok(status) if status.success() => true, + Ok(status) => { + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + ?status, + "Cannot fully enable seccomp, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." + ); + false + }, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + "Could not start child process: {}", + err + ); + false + }, + } + } else { + gum::warn!( + target: LOG_TARGET, + "Cannot enable seccomp, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with seccomp support for maximum security." + ); + false + } + } +} + +const AUDIT_LOG_PATH: &'static str = "/var/log/audit/audit.log"; +const SYSLOG_PATH: &'static str = "/var/log/syslog"; + +/// System audit log. +pub struct AuditLogFile { + file: File, + path: &'static str, +} + +impl AuditLogFile { + /// Looks for an audit log file on the system and opens it, seeking to the end to skip any + /// events from before this was called. + /// + /// A bit of a verbose name, but it should clue future refactorers not to move calls closer to + /// where the `AuditLogFile` is used. + pub async fn try_open_and_seek_to_end() -> Option { + let mut path = AUDIT_LOG_PATH; + let mut file = match OpenOptions::new().read(true).open(AUDIT_LOG_PATH).await { + Ok(file) => Ok(file), + Err(_) => { + path = SYSLOG_PATH; + OpenOptions::new().read(true).open(SYSLOG_PATH).await + }, + } + .ok()?; + + let _pos = file.seek(SeekFrom::End(0)).await; + + Some(Self { file, path }) + } + + async fn read_new_since_open(mut self) -> String { + let mut buf = String::new(); + let _len = self.file.read_to_string(&mut buf).await; + buf + } +} + +/// Check if a seccomp violation occurred for the given worker. As the syslog may be in a different +/// location, or seccomp auditing may be disabled, this function provides a best-effort attempt +/// only. +/// +/// The `audit_log_file` must have been obtained before the job started. It only allows reading +/// entries that were written since it was obtained, so that we do not consider events from previous +/// processes with the same pid. This can still be racy, but it's unlikely and fine for a +/// best-effort attempt. +pub async fn check_seccomp_violations_for_worker( + audit_log_file: Option, + worker_pid: u32, +) -> Vec { + let audit_event_pid_field = format!("pid={worker_pid}"); + + let audit_log_file = match audit_log_file { + Some(file) => { + gum::debug!( + target: LOG_TARGET, + %worker_pid, + audit_log_path = ?file.path, + "checking audit log for seccomp violations", + ); + file + }, + None => { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + "could not open either {AUDIT_LOG_PATH} or {SYSLOG_PATH} for reading audit logs" + ); + return vec![] + }, + }; + let events = audit_log_file.read_new_since_open().await; + + let mut violations = vec![]; + for event in events.lines() { + if let Some(syscall) = parse_audit_log_for_seccomp_event(event, &audit_event_pid_field) { + violations.push(syscall); + } + } + + violations +} + +fn parse_audit_log_for_seccomp_event(event: &str, audit_event_pid_field: &str) -> Option { + const SECCOMP_AUDIT_EVENT_TYPE: &'static str = "type=1326"; + + // Do a series of simple .contains instead of a regex, because I'm not sure if the fields are + // guaranteed to always be in the same order. + if !event.contains(SECCOMP_AUDIT_EVENT_TYPE) || !event.contains(&audit_event_pid_field) { + return None + } + + // Get the syscall. Let's avoid a dependency on regex just for this. + for field in event.split(" ") { + if let Some(syscall) = field.strip_prefix("syscall=") { + return syscall.parse::().ok() + } + } + + None +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_audit_log_for_seccomp_event() { + let audit_event_pid_field = "pid=2559058"; + + assert_eq!( + parse_audit_log_for_seccomp_event( + r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1326 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559058 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, + audit_event_pid_field + ), + Some(53) + ); + // pid is wrong + assert_eq!( + parse_audit_log_for_seccomp_event( + r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1326 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, + audit_event_pid_field + ), + None + ); + // type is wrong + assert_eq!( + parse_audit_log_for_seccomp_event( + r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1327 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, + audit_event_pid_field + ), + None + ); + // no syscall field + assert_eq!( + parse_audit_log_for_seccomp_event( + r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1327 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e compat=0 ip=0x7f7542c80d5e code=0x80000000"#, + audit_event_pid_field + ), + None + ); + } +} diff --git a/polkadot/node/core/pvf/src/worker_intf.rs b/polkadot/node/core/pvf/src/worker_intf.rs index e9382b66bf75..8f9a7de354b8 100644 --- a/polkadot/node/core/pvf/src/worker_intf.rs +++ b/polkadot/node/core/pvf/src/worker_intf.rs @@ -245,7 +245,7 @@ pub enum SpawnErr { /// has been terminated. Since the worker is running in another process it is obviously not /// necessary to poll this future to make the worker run, it's only for termination detection. /// -/// This future relies on the fact that a child process's stdout `fd` is closed upon it's +/// This future relies on the fact that a child process's stdout `fd` is closed upon its /// termination. #[pin_project] pub struct WorkerHandle { @@ -270,6 +270,9 @@ impl WorkerHandle { if security_status.can_enable_landlock { args.push("--can-enable-landlock".to_string()); } + if security_status.can_enable_seccomp { + args.push("--can-enable-seccomp".to_string()); + } if security_status.can_unshare_user_namespace_and_change_root { args.push("--can-unshare-user-namespace-and-change-root".to_string()); } diff --git a/polkadot/node/core/pvf/tests/it/adder.rs b/polkadot/node/core/pvf/tests/it/adder.rs index 8bdd09db208a..e8d8a9a6b63e 100644 --- a/polkadot/node/core/pvf/tests/it/adder.rs +++ b/polkadot/node/core/pvf/tests/it/adder.rs @@ -28,7 +28,7 @@ async fn execute_good_block_on_parent() { let block_data = BlockData { state: 0, add: 512 }; - let host = TestHost::new(); + let host = TestHost::new().await; let ret = host .validate_candidate( @@ -56,7 +56,7 @@ async fn execute_good_chain_on_parent() { let mut parent_hash = [0; 32]; let mut last_state = 0; - let host = TestHost::new(); + let host = TestHost::new().await; for (number, add) in (0..10).enumerate() { let parent_head = @@ -98,7 +98,7 @@ async fn execute_bad_block_on_parent() { add: 256, }; - let host = TestHost::new(); + let host = TestHost::new().await; let _err = host .validate_candidate( @@ -117,7 +117,7 @@ async fn execute_bad_block_on_parent() { #[tokio::test] async fn stress_spawn() { - let host = std::sync::Arc::new(TestHost::new()); + let host = std::sync::Arc::new(TestHost::new().await); async fn execute(host: std::sync::Arc) { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; @@ -149,9 +149,12 @@ async fn stress_spawn() { // With one worker, run multiple execution jobs serially. They should not conflict. #[tokio::test] async fn execute_can_run_serially() { - let host = std::sync::Arc::new(TestHost::new_with_config(|cfg| { - cfg.execute_workers_max_num = 1; - })); + let host = std::sync::Arc::new( + TestHost::new_with_config(|cfg| { + cfg.execute_workers_max_num = 1; + }) + .await, + ); async fn execute(host: std::sync::Arc) { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index cdf8d6eb82d2..a69a488adb98 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -#[cfg(feature = "ci-only-tests")] use assert_matches::assert_matches; use parity_scale_codec::Encode as _; use polkadot_node_core_pvf::{ @@ -24,6 +23,8 @@ use polkadot_node_core_pvf::{ }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::ExecutorParams; +#[cfg(target_os = "linux")] +use rusty_fork::rusty_fork_test; #[cfg(feature = "ci-only-tests")] use polkadot_primitives::ExecutorParam; @@ -43,11 +44,11 @@ struct TestHost { } impl TestHost { - fn new() -> Self { - Self::new_with_config(|_| ()) + async fn new() -> Self { + Self::new_with_config(|_| ()).await } - fn new_with_config(f: F) -> Self + async fn new_with_config(f: F) -> Self where F: FnOnce(&mut Config), { @@ -61,7 +62,7 @@ impl TestHost { execute_worker_path, ); f(&mut config); - let (host, task) = start(config, Metrics::default()); + let (host, task) = start(config, Metrics::default()).await; let _ = tokio::task::spawn(task); Self { cache_dir, host: Mutex::new(host) } } @@ -127,7 +128,7 @@ impl TestHost { #[tokio::test] async fn terminates_on_timeout() { - let host = TestHost::new(); + let host = TestHost::new().await; let start = std::time::Instant::now(); let result = host @@ -153,11 +154,113 @@ async fn terminates_on_timeout() { assert!(duration < TEST_EXECUTION_TIMEOUT * JOB_TIMEOUT_WALL_CLOCK_FACTOR); } +#[cfg(target_os = "linux")] +fn kill_by_sid_and_name(sid: i32, exe_name: &'static str) { + use procfs::process; + + let all_processes: Vec = process::all_processes() + .expect("Can't read /proc") + .filter_map(|p| match p { + Ok(p) => Some(p), // happy path + Err(e) => match e { + // process vanished during iteration, ignore it + procfs::ProcError::NotFound(_) => None, + x => { + panic!("some unknown error: {}", x); + }, + }, + }) + .collect(); + + for process in all_processes { + if process.stat().unwrap().session == sid && + process.exe().unwrap().to_str().unwrap().contains(exe_name) + { + assert_eq!(unsafe { libc::kill(process.pid(), 9) }, 0); + } + } +} + +// Run these tests in their own processes with rusty-fork. They work by each creating a new session, +// then killing the worker process that matches the session ID and expected worker name. +#[cfg(target_os = "linux")] +rusty_fork_test! { + // What happens when the prepare worker dies in the middle of a job? + #[test] + fn prepare_worker_killed_during_job() { + const PROCESS_NAME: &'static str = "polkadot-prepare-worker"; + + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job in the middle of the timeout. + async { + tokio::time::sleep(TEST_PREPARATION_TIMEOUT / 2).await; + kill_by_sid_and_name(sid, PROCESS_NAME); + } + ); + + assert_matches!(result, Err(PrepareError::IoErr(_))); + }) + } + + // What happens when the execute worker dies in the middle of a job? + #[test] + fn execute_worker_killed_during_job() { + const PROCESS_NAME: &'static str = "polkadot-execute-worker"; + + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose an job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that kills the job in the middle of the timeout. + async { + tokio::time::sleep(TEST_EXECUTION_TIMEOUT / 2).await; + kill_by_sid_and_name(sid, PROCESS_NAME); + } + ); + + assert_matches!( + result, + Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + ); + }) + } +} + #[cfg(feature = "ci-only-tests")] #[tokio::test] async fn ensure_parallel_execution() { // Run some jobs that do not complete, thus timing out. - let host = TestHost::new(); + let host = TestHost::new().await; let execute_pvf_future_1 = host.validate_candidate( halt::wasm_binary_unwrap(), ValidationParams { @@ -204,7 +307,8 @@ async fn ensure_parallel_execution() { async fn execute_queue_doesnt_stall_if_workers_died() { let host = TestHost::new_with_config(|cfg| { cfg.execute_workers_max_num = 5; - }); + }) + .await; // Here we spawn 8 validation jobs for the `halt` PVF and share those between 5 workers. The // first five jobs should timeout and the workers killed. For the next 3 jobs a new batch of @@ -241,7 +345,8 @@ async fn execute_queue_doesnt_stall_if_workers_died() { async fn execute_queue_doesnt_stall_with_varying_executor_params() { let host = TestHost::new_with_config(|cfg| { cfg.execute_workers_max_num = 2; - }); + }) + .await; let executor_params_1 = ExecutorParams::default(); let executor_params_2 = ExecutorParams::from(&[ExecutorParam::StackLogicalMax(1024)][..]); @@ -289,7 +394,7 @@ async fn execute_queue_doesnt_stall_with_varying_executor_params() { // Test that deleting a prepared artifact does not lead to a dispute when we try to execute it. #[tokio::test] async fn deleting_prepared_artifact_does_not_dispute() { - let host = TestHost::new(); + let host = TestHost::new().await; let cache_dir = host.cache_dir.path(); let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); @@ -334,7 +439,8 @@ async fn deleting_prepared_artifact_does_not_dispute() { async fn prepare_can_run_serially() { let host = TestHost::new_with_config(|cfg| { cfg.prepare_workers_hard_max_num = 1; - }); + }) + .await; let _stats = host .precheck_pvf(::adder::wasm_binary_unwrap(), Default::default()) diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index 6a14a3a013d4..0cefeb1f77ca 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -126,6 +126,19 @@ with untrusted code does not have unnecessary access to the file-system. This provides some protection against attackers accessing sensitive data or modifying data on the host machine. +*Currently this is only supported on Linux.* + + + + + + + + + + + + ### Clearing env vars We clear environment variables before handling untrusted code, because why give diff --git a/polkadot/scripts/list-syscalls/execute-worker-syscalls b/polkadot/scripts/list-syscalls/execute-worker-syscalls index 05abe9ba7368..4a7a66181299 100644 --- a/polkadot/scripts/list-syscalls/execute-worker-syscalls +++ b/polkadot/scripts/list-syscalls/execute-worker-syscalls @@ -24,10 +24,8 @@ 42 (connect) 45 (recvfrom) 46 (sendmsg) -53 (socketpair) 56 (clone) 60 (exit) -61 (wait4) 62 (kill) 72 (fcntl) 79 (getcwd) @@ -52,23 +50,16 @@ 200 (tkill) 202 (futex) 204 (sched_getaffinity) -213 (epoll_create) 217 (getdents64) 218 (set_tid_address) 228 (clock_gettime) 230 (clock_nanosleep) 231 (exit_group) -232 (epoll_wait) -233 (epoll_ctl) 257 (openat) 262 (newfstatat) 263 (unlinkat) 272 (unshare) 273 (set_robust_list) -281 (epoll_pwait) -284 (eventfd) -290 (eventfd2) -291 (epoll_create1) 302 (prlimit64) 318 (getrandom) 319 (memfd_create) diff --git a/polkadot/scripts/list-syscalls/prepare-worker-syscalls b/polkadot/scripts/list-syscalls/prepare-worker-syscalls index f1597f206756..cab58e06692b 100644 --- a/polkadot/scripts/list-syscalls/prepare-worker-syscalls +++ b/polkadot/scripts/list-syscalls/prepare-worker-syscalls @@ -24,10 +24,8 @@ 42 (connect) 45 (recvfrom) 46 (sendmsg) -53 (socketpair) 56 (clone) 60 (exit) -61 (wait4) 62 (kill) 72 (fcntl) 79 (getcwd) @@ -54,23 +52,16 @@ 202 (futex) 203 (sched_setaffinity) 204 (sched_getaffinity) -213 (epoll_create) 217 (getdents64) 218 (set_tid_address) 228 (clock_gettime) 230 (clock_nanosleep) 231 (exit_group) -232 (epoll_wait) -233 (epoll_ctl) 257 (openat) 262 (newfstatat) 263 (unlinkat) 272 (unshare) 273 (set_robust_list) -281 (epoll_pwait) -284 (eventfd) -290 (eventfd2) -291 (epoll_create1) 302 (prlimit64) 309 (getcpu) 318 (getrandom) From 18ad449015e243d148d48688f2492d435fc75522 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 31 Oct 2023 12:08:32 +0100 Subject: [PATCH 35/69] Contracts migration update (#2091) Restore fix from #2077 --- substrate/frame/contracts/src/migration.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 1873ef2765b1..6d61cb6b1e1a 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -312,10 +312,7 @@ impl OnRuntimeUpgrade for Migration>::current_storage_version(); if on_chain_version == current_version { - log::warn!( - target: LOG_TARGET, - "No upgrade: Please remove this migration from your Migrations tuple" - ) + return Ok(Default::default()) } log::debug!( @@ -324,9 +321,11 @@ impl OnRuntimeUpgrade for Migration>::name(), on_chain_version, current_version ); - if !T::Migrations::is_upgrade_supported(on_chain_version, current_version) { - log::warn!(target: LOG_TARGET, "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)") - } + ensure!( + T::Migrations::is_upgrade_supported(on_chain_version, current_version), + "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)" + ); + Ok(Default::default()) } From d85c1d9117293ef7215ebdcbf86fbe8eae429b56 Mon Sep 17 00:00:00 2001 From: Rahul Subramaniyam <78006270+rahulksnv@users.noreply.github.com> Date: Tue, 31 Oct 2023 05:11:00 -0700 Subject: [PATCH 36/69] Add test to demonstrate the failure scenario (#1999) The change adds a test to show the failure scenario that caused #1812 to be rolled back (more context: https://github.com/paritytech/polkadot-sdk/issues/493#issuecomment-1772009924) Summary of the scenario: 1. Node has finished downloading up to block 1000 from the peers, from the canonical chain. 2. Peers are undergoing re-org around this time. One of the peers has switched to a non-canonical chain, announces block 1001 from that chain 3. Node downloads 1001 from the peer, and tries to import which would fail (as we don't have the parent block 1000 from the other chain) --------- Co-authored-by: Dmitry Markin --- Cargo.lock | 2 +- substrate/client/network/sync/src/lib.rs | 125 +++++++++++++++++++++++ 2 files changed, 126 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index c368a957764e..8315bebc6830 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18466,7 +18466,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.8", + "rustix 0.38.21", "windows-sys 0.48.0", ] diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index 10eaa2450518..0c8e8a104e2a 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -3364,4 +3364,129 @@ mod test { pending_responses.remove(&peers[1]); assert_eq!(pending_responses.len(), 0); } + + /// The test demonstrates https://github.com/paritytech/polkadot-sdk/issues/2094. + /// TODO: convert it into desired behavior test once the issue is fixed (see inline comments). + /// The issue: we currently rely on block numbers instead of block hash + /// to download blocks from peers. As a result, we can end up with blocks + /// from different forks as shown by the test. + #[test] + #[should_panic] + fn request_across_forks() { + sp_tracing::try_init_simple(); + + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..100).map(|_| build_block(&mut client, None, false)).collect::>(); + + let fork_a_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let mut fork_blocks = blocks[..] + .into_iter() + .inspect(|b| { + assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_)))); + block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap() + }) + .cloned() + .collect::>(); + for _ in 0..10 { + fork_blocks.push(build_block(&mut client, None, false)); + } + fork_blocks + }; + + let fork_b_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let mut fork_blocks = blocks[..] + .into_iter() + .inspect(|b| { + assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_)))); + block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap() + }) + .cloned() + .collect::>(); + for _ in 0..10 { + fork_blocks.push(build_block(&mut client, None, true)); + } + fork_blocks + }; + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 5, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + // Add the peers, all at the common ancestor 100. + let common_block = blocks.last().unwrap(); + let peer_id1 = PeerId::random(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) + .unwrap(); + let peer_id2 = PeerId::random(); + sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()) + .unwrap(); + + // Peer 1 announces 107 from fork 1, 100-107 get downloaded. + { + let block = (&fork_a_blocks[106]).clone(); + let peer = peer_id1; + log::trace!(target: LOG_TARGET, "<1> {peer} announces from fork 1"); + send_block_announce(block.header().clone(), peer, &mut sync); + let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 7, &peer); + let mut resp_blocks = fork_a_blocks[100_usize..107_usize].to_vec(); + resp_blocks.reverse(); + let response = create_block_response(resp_blocks.clone()); + let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 7_usize + ),); + assert_eq!(sync.best_queued_number, 107); + assert_eq!(sync.best_queued_hash, block.hash()); + assert!(sync.is_known(&block.header.parent_hash())); + } + + // Peer 2 also announces 107 from fork 1. + { + let prev_best_number = sync.best_queued_number; + let prev_best_hash = sync.best_queued_hash; + let peer = peer_id2; + log::trace!(target: LOG_TARGET, "<2> {peer} announces from fork 1"); + for i in 100..107 { + let block = (&fork_a_blocks[i]).clone(); + send_block_announce(block.header().clone(), peer, &mut sync); + assert!(sync.block_requests().is_empty()); + } + assert_eq!(sync.best_queued_number, prev_best_number); + assert_eq!(sync.best_queued_hash, prev_best_hash); + } + + // Peer 2 undergoes reorg, announces 108 from fork 2, gets downloaded even though we + // don't have the parent from fork 2. + { + let block = (&fork_b_blocks[107]).clone(); + let peer = peer_id2; + log::trace!(target: LOG_TARGET, "<3> {peer} announces from fork 2"); + send_block_announce(block.header().clone(), peer, &mut sync); + // TODO: when the issue is fixed, this test can be changed to test the + // expected behavior instead. The needed changes would be: + // 1. Remove the `#[should_panic]` directive + // 2. These should be changed to check that sync.block_requests().is_empty(), after the + // block is announced. + let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 1, &peer); + let response = create_block_response(vec![block.clone()]); + let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 1_usize + ),); + assert!(sync.is_known(&block.header.parent_hash())); + } + } } From 3ae86ae075ac5eb9b80f89f09bd7f4a63f97c582 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 31 Oct 2023 14:07:10 +0100 Subject: [PATCH 37/69] check-each-crate: Do not reference crate to check by name (#2098) This pull request changes how `check-each-crate.py` is working. Instead of passing the name of the crate via `-p`, we now jump into the directory of the crate and call there `cargo check`. This should fix issues like https://github.com/paritytech/polkadot-sdk/issues/2013 where a crate is present twice in the `Cargo.lock`. Besides that it also changes `core/Cargo.toml` to not always pull in bandersnatch. --- .gitlab/check-each-crate.py | 10 +++++++--- substrate/primitives/core/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.gitlab/check-each-crate.py b/.gitlab/check-each-crate.py index adad4f5bd583..da2eaad36c52 100755 --- a/.gitlab/check-each-crate.py +++ b/.gitlab/check-each-crate.py @@ -19,7 +19,11 @@ crates = [] for line in output.splitlines(): if line != b"": - crates.append(line.decode('utf8').split(" ")[0]) + line = line.decode('utf8').split(" ") + crate_name = line[0] + # The crate path is always the last element in the line. + crate_path = line[len(line) - 1].replace("(", "").replace(")", "") + crates.append((crate_name, crate_path)) # Make the list unique and sorted crates = list(set(crates)) @@ -49,9 +53,9 @@ for i in range(0, crates_per_group + overflow_crates): crate = crates_per_group * target_group + i - print(f"Checking {crates[crate]}", file=sys.stderr) + print(f"Checking {crates[crate][0]}", file=sys.stderr) - res = subprocess.run(["cargo", "check", "--locked", "-p", crates[crate]]) + res = subprocess.run(["cargo", "check", "--locked"], cwd = crates[crate][1]) if res.returncode != 0: sys.exit(1) diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 1e8a353f419b..7f329832efd6 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -76,7 +76,7 @@ bench = false default = [ "std" ] std = [ "array-bytes", - "bandersnatch_vrfs/getrandom", + "bandersnatch_vrfs?/getrandom", "bip39/rand", "bip39/std", "blake2/std", From c38aae628b437dd7bd0e2178dc6e73f685b424f4 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 31 Oct 2023 14:59:15 +0100 Subject: [PATCH 38/69] Elliptic curves utilities refactory (#2068) - Usage the new published [arkworks-extensions](https://github.com/paritytech/arkworks-extensions) crates. Hooks are internally defined to jump into the proper host functions. - Conditional compilation of each curve (gated by feature with curve name) - Separation in smaller host functions sets, divided by curve (fits nicely with prev point) --- Cargo.lock | 85 +++++ .../primitives/crypto/ec-utils/Cargo.toml | 63 ++-- .../crypto/ec-utils/src/bls12_377.rs | 205 ++++++++++++ .../crypto/ec-utils/src/bls12_381.rs | 195 ++++++++++++ .../primitives/crypto/ec-utils/src/bw6_761.rs | 186 +++++++++++ .../crypto/ec-utils/src/ed_on_bls12_377.rs | 88 ++++++ .../src/ed_on_bls12_381_bandersnatch.rs | 153 +++++++++ .../primitives/crypto/ec-utils/src/lib.rs | 295 ++---------------- .../primitives/crypto/ec-utils/src/utils.rs | 151 +++++---- 9 files changed, 1059 insertions(+), 362 deletions(-) create mode 100644 substrate/primitives/crypto/ec-utils/src/bls12_377.rs create mode 100644 substrate/primitives/crypto/ec-utils/src/bls12_381.rs create mode 100644 substrate/primitives/crypto/ec-utils/src/bw6_761.rs create mode 100644 substrate/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs create mode 100644 substrate/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs diff --git a/Cargo.lock b/Cargo.lock index 8315bebc6830..8cedf9f2c742 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -327,6 +327,18 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-bls12-377-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20c7021f180a0cbea0380eba97c2af3c57074cdaffe0eef7e840e1c9f2841e55" +dependencies = [ + "ark-bls12-377", + "ark-ec", + "ark-models-ext", + "ark-std", +] + [[package]] name = "ark-bls12-381" version = "0.4.0" @@ -339,6 +351,20 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-bls12-381-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1dc4b3d08f19e8ec06e949712f95b8361e43f1391d94f65e4234df03480631c" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-models-ext", + "ark-serialize", + "ark-std", +] + [[package]] name = "ark-bw6-761" version = "0.4.0" @@ -351,6 +377,19 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-bw6-761-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccee5fba47266f460067588ee1bf070a9c760bf2050c1c509982c5719aadb4f2" +dependencies = [ + "ark-bw6-761", + "ark-ec", + "ark-ff", + "ark-models-ext", + "ark-std", +] + [[package]] name = "ark-ec" version = "0.4.2" @@ -365,6 +404,7 @@ dependencies = [ "hashbrown 0.13.2", "itertools 0.10.5", "num-traits", + "rayon", "zeroize", ] @@ -380,6 +420,19 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-ed-on-bls12-377-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524a4fb7540df2e1a8c2e67a83ba1d1e6c3947f4f9342cc2359fc2e789ad731d" +dependencies = [ + "ark-ec", + "ark-ed-on-bls12-377", + "ark-ff", + "ark-models-ext", + "ark-std", +] + [[package]] name = "ark-ed-on-bls12-381-bandersnatch" version = "0.4.0" @@ -392,6 +445,19 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-ed-on-bls12-381-bandersnatch-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d15185f1acb49a07ff8cbe5f11a1adc5a93b19e211e325d826ae98e98e124346" +dependencies = [ + "ark-ec", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-models-ext", + "ark-std", +] + [[package]] name = "ark-ff" version = "0.4.2" @@ -435,6 +501,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-models-ext" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e9eab5d4b5ff2f228b763d38442adc9b084b0a465409b059fac5c2308835ec2" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", +] + [[package]] name = "ark-poly" version = "0.4.2" @@ -508,6 +587,7 @@ checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", "rand 0.8.5", + "rayon", ] [[package]] @@ -17179,11 +17259,16 @@ name = "sp-crypto-ec-utils" version = "0.4.0" dependencies = [ "ark-bls12-377", + "ark-bls12-377-ext", "ark-bls12-381", + "ark-bls12-381-ext", "ark-bw6-761", + "ark-bw6-761-ext", "ark-ec", "ark-ed-on-bls12-377", + "ark-ed-on-bls12-377-ext", "ark-ed-on-bls12-381-bandersnatch", + "ark-ed-on-bls12-381-bandersnatch-ext", "ark-scale", "sp-runtime-interface", "sp-std", diff --git a/substrate/primitives/crypto/ec-utils/Cargo.toml b/substrate/primitives/crypto/ec-utils/Cargo.toml index e091385071c9..651fc96d7ac1 100644 --- a/substrate/primitives/crypto/ec-utils/Cargo.toml +++ b/substrate/primitives/crypto/ec-utils/Cargo.toml @@ -12,26 +12,53 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ark-ec = { version = "0.4.2", default-features = false } -ark-bls12-377 = { version = "0.4.0", features = ["curve"], default-features = false } -ark-bls12-381 = { version = "0.4.0", features = ["curve"], default-features = false } -ark-bw6-761 = { version = "0.4.0", default-features = false } -ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false } -ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false } -ark-scale = { version = "0.0.11", features = ["hazmat"], default-features = false } -sp-runtime-interface = { path = "../../runtime-interface", default-features = false} -sp-std = { path = "../../std", default-features = false } +ark-ec = { version = "0.4.2", default-features = false, optional = true } +ark-bls12-377-ext = { version = "0.4.1", default-features = false, optional = true } +ark-bls12-377 = { version = "0.4.0", default-features = false, features = ["curve"], optional = true } +ark-bls12-381-ext = { version = "0.4.1", default-features = false, optional = true } +ark-bls12-381 = { version = "0.4.0", default-features = false, features = ["curve"], optional = true } +ark-bw6-761-ext = { version = "0.4.1", default-features = false, optional = true } +ark-bw6-761 = { version = "0.4.0", default-features = false, optional = true } +ark-ed-on-bls12-381-bandersnatch-ext = { version = "0.4.1", default-features = false, optional = true } +ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false, optional = true } +ark-ed-on-bls12-377-ext = { version = "0.4.1", default-features = false, optional = true } +ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false, optional = true } +ark-scale = { version = "0.0.11", default-features = false, features = ["hazmat"], optional = true } +sp-runtime-interface = { path = "../../runtime-interface", default-features = false, optional = true } +sp-std = { path = "../../std", default-features = false, optional = true } [features] default = [ "std" ] std = [ - "ark-bls12-377/std", - "ark-bls12-381/std", - "ark-bw6-761/std", - "ark-ec/std", - "ark-ed-on-bls12-377/std", - "ark-ed-on-bls12-381-bandersnatch/std", - "ark-scale/std", - "sp-runtime-interface/std", - "sp-std/std", + "ark-bls12-377-ext?/std", + "ark-bls12-377?/std", + "ark-bls12-381-ext?/std", + "ark-bls12-381?/std", + "ark-bw6-761-ext?/std", + "ark-bw6-761?/std", + "ark-ec?/parallel", + "ark-ed-on-bls12-377-ext?/std", + "ark-ed-on-bls12-377?/std", + "ark-ed-on-bls12-381-bandersnatch-ext?/std", + "ark-ed-on-bls12-381-bandersnatch?/std", + "ark-scale?/std", + "sp-runtime-interface?/std", + "sp-std?/std", +] +common = [ "ark-ec", "ark-scale", "sp-runtime-interface", "sp-std" ] +bls12-377 = [ "ark-bls12-377", "ark-bls12-377-ext", "common" ] +bls12-381 = [ "ark-bls12-381", "ark-bls12-381-ext", "common" ] +bw6-761 = [ "ark-bw6-761", "ark-bw6-761-ext", "common" ] +ed-on-bls12-377 = [ "ark-ed-on-bls12-377", "ark-ed-on-bls12-377-ext", "common" ] +ed-on-bls12-381-bandersnatch = [ + "ark-ed-on-bls12-381-bandersnatch", + "ark-ed-on-bls12-381-bandersnatch-ext", + "common", +] +all-curves = [ + "bls12-377", + "bls12-381", + "bw6-761", + "ed-on-bls12-377", + "ed-on-bls12-381-bandersnatch", ] diff --git a/substrate/primitives/crypto/ec-utils/src/bls12_377.rs b/substrate/primitives/crypto/ec-utils/src/bls12_377.rs new file mode 100644 index 000000000000..8f19a2c4a191 --- /dev/null +++ b/substrate/primitives/crypto/ec-utils/src/bls12_377.rs @@ -0,0 +1,205 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! *BLS12-377* types and host functions. + +use crate::utils; +use ark_bls12_377_ext::CurveHooks; +use ark_ec::{pairing::Pairing, CurveConfig}; +use sp_runtime_interface::runtime_interface; +use sp_std::vec::Vec; + +/// First pairing group definitions. +pub mod g1 { + pub use ark_bls12_377_ext::g1::{ + G1_GENERATOR_X, G1_GENERATOR_Y, TE_GENERATOR_X, TE_GENERATOR_Y, + }; + /// Group configuration. + pub type Config = ark_bls12_377_ext::g1::Config; + /// Short Weierstrass form point affine representation. + pub type G1Affine = ark_bls12_377_ext::g1::G1Affine; + /// Short Weierstrass form point projective representation. + pub type G1Projective = ark_bls12_377_ext::g1::G1Projective; + /// Short Weierstrass form point affine representation. + pub type G1SWAffine = ark_bls12_377_ext::g1::G1SWAffine; + /// Short Weierstrass form point projective representation. + pub type G1SWProjective = ark_bls12_377_ext::g1::G1SWProjective; + /// Twisted Edwards form point affine representation. + pub type G1TEAffine = ark_bls12_377_ext::g1::G1TEAffine; + /// Twisted Edwards form point projective representation. + pub type G1TEProjective = ark_bls12_377_ext::g1::G1TEProjective; +} + +/// Second pairing group definitions. +pub mod g2 { + pub use ark_bls12_377_ext::g2::{ + G2_GENERATOR_X, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1, G2_GENERATOR_Y, G2_GENERATOR_Y_C0, + G2_GENERATOR_Y_C1, + }; + /// Group configuration. + pub type Config = ark_bls12_377_ext::g2::Config; + /// Short Weierstrass form point affine representation. + pub type G2Affine = ark_bls12_377_ext::g2::G2Affine; + /// Short Weierstrass form point projective representation. + pub type G2Projective = ark_bls12_377_ext::g2::G2Projective; +} + +pub use self::{ + g1::{Config as G1Config, G1Affine, G1Projective}, + g2::{Config as G2Config, G2Affine, G2Projective}, +}; + +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; + +/// Configuration for *BLS12-377* curve. +pub type Config = ark_bls12_377_ext::Config; + +/// *BLS12-377* definition. +/// +/// A generic *BLS12* model specialized with *BLS12-377* configuration. +pub type Bls12_377 = ark_bls12_377_ext::Bls12_377; + +impl CurveHooks for HostHooks { + fn bls12_377_multi_miller_loop( + g1: impl Iterator::G1Prepared>, + g2: impl Iterator::G2Prepared>, + ) -> Result<::TargetField, ()> { + let g1 = utils::encode(g1.collect::>()); + let g2 = utils::encode(g2.collect::>()); + let res = host_calls::bls12_377_multi_miller_loop(g1, g2).unwrap_or_default(); + utils::decode(res) + } + + fn bls12_377_final_exponentiation( + target: ::TargetField, + ) -> Result<::TargetField, ()> { + let target = utils::encode(target); + let res = host_calls::bls12_377_final_exponentiation(target).unwrap_or_default(); + utils::decode(res) + } + + fn bls12_377_msm_g1( + bases: &[G1Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bls12_377_msm_g1(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bls12_377_msm_g2( + bases: &[G2Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bls12_377_msm_g2(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bls12_377_mul_projective_g1( + base: &G1Projective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bls12_377_mul_projective_g1(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bls12_377_mul_projective_g2( + base: &G2Projective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bls12_377_mul_projective_g2(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } +} + +/// Interfaces for working with *Arkworks* *BLS12-377* elliptic curve related types +/// from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Pairing multi Miller loop for *BLS12-377*. + /// + /// - Receives encoded: + /// - `a`: `ArkScale>`. + /// - `b`: `ArkScale>`. + /// - Returns encoded: `ArkScale`. + fn bls12_377_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + utils::multi_miller_loop::(a, b) + } + + /// Pairing final exponentiation for *BLS12-377.* + /// + /// - Receives encoded: `ArkScale`. + /// - Returns encoded: `ArkScale`. + fn bls12_377_final_exponentiation(f: Vec) -> Result, ()> { + utils::final_exponentiation::(f) + } + + /// Multi scalar multiplication on *G1* for *BLS12-377*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_377_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } + + /// Multi scalar multiplication on *G2* for *BLS12-377*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_377_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } + + /// Projective multiplication on *G1* for *BLS12-377*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_377_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } + + /// Projective multiplication on *G2* for *BLS12-377*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_377_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } +} diff --git a/substrate/primitives/crypto/ec-utils/src/bls12_381.rs b/substrate/primitives/crypto/ec-utils/src/bls12_381.rs new file mode 100644 index 000000000000..99a0289b7ad2 --- /dev/null +++ b/substrate/primitives/crypto/ec-utils/src/bls12_381.rs @@ -0,0 +1,195 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! *BLS12-381* types and host functions. + +use crate::utils; +use ark_bls12_381_ext::CurveHooks; +use ark_ec::{pairing::Pairing, CurveConfig}; +use sp_runtime_interface::runtime_interface; +use sp_std::vec::Vec; + +/// First pairing group definitions. +pub mod g1 { + pub use ark_bls12_381_ext::g1::{BETA, G1_GENERATOR_X, G1_GENERATOR_Y}; + /// Group configuration. + pub type Config = ark_bls12_381_ext::g1::Config; + /// Short Weierstrass form point affine representation. + pub type G1Affine = ark_bls12_381_ext::g1::G1Affine; + /// Short Weierstrass form point projective representation. + pub type G1Projective = ark_bls12_381_ext::g1::G1Projective; +} + +/// Second pairing group definitions. +pub mod g2 { + pub use ark_bls12_381_ext::g2::{ + G2_GENERATOR_X, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1, G2_GENERATOR_Y, G2_GENERATOR_Y_C0, + G2_GENERATOR_Y_C1, + }; + /// Group configuration. + pub type Config = ark_bls12_381_ext::g2::Config; + /// Short Weierstrass form point affine representation. + pub type G2Affine = ark_bls12_381_ext::g2::G2Affine; + /// Short Weierstrass form point projective representation. + pub type G2Projective = ark_bls12_381_ext::g2::G2Projective; +} + +pub use self::{ + g1::{Config as G1Config, G1Affine, G1Projective}, + g2::{Config as G2Config, G2Affine, G2Projective}, +}; + +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; + +/// Configuration for *BLS12-381* curve. +pub type Config = ark_bls12_381_ext::Config; + +/// *BLS12-381* definition. +/// +/// A generic *BLS12* model specialized with *BLS12-381* configuration. +pub type Bls12_381 = ark_bls12_381_ext::Bls12_381; + +impl CurveHooks for HostHooks { + fn bls12_381_multi_miller_loop( + g1: impl Iterator::G1Prepared>, + g2: impl Iterator::G2Prepared>, + ) -> Result<::TargetField, ()> { + let g1 = utils::encode(g1.collect::>()); + let g2 = utils::encode(g2.collect::>()); + let res = host_calls::bls12_381_multi_miller_loop(g1, g2).unwrap_or_default(); + utils::decode(res) + } + + fn bls12_381_final_exponentiation( + target: ::TargetField, + ) -> Result<::TargetField, ()> { + let target = utils::encode(target); + let res = host_calls::bls12_381_final_exponentiation(target).unwrap_or_default(); + utils::decode(res) + } + + fn bls12_381_msm_g1( + bases: &[G1Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bls12_381_msm_g1(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bls12_381_msm_g2( + bases: &[G2Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bls12_381_msm_g2(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bls12_381_mul_projective_g1( + base: &G1Projective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bls12_381_mul_projective_g1(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bls12_381_mul_projective_g2( + base: &G2Projective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bls12_381_mul_projective_g2(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } +} + +/// Interfaces for working with *Arkworks* *BLS12-381* elliptic curve related types +/// from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Pairing multi Miller loop for *BLS12-381*. + /// + /// - Receives encoded: + /// - `a`: `ArkScale>`. + /// - `b`: `ArkScale>`. + /// - Returns encoded: `ArkScale`. + fn bls12_381_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + utils::multi_miller_loop::(a, b) + } + + /// Pairing final exponentiation for *BLS12-381*. + /// + /// - Receives encoded: `ArkScale<`. + /// - Returns encoded: `ArkScale<` + fn bls12_381_final_exponentiation(f: Vec) -> Result, ()> { + utils::final_exponentiation::(f) + } + + /// Multi scalar multiplication on *G1* for *BLS12-381* + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_381_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } + + /// Multi scalar multiplication on *G2* for *BLS12-381* + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_381_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } + + /// Projective multiplication on *G1* for *BLS12-381*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_381_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } + + /// Projective multiplication on *G2* for *BLS12-381* + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bls12_381_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } +} diff --git a/substrate/primitives/crypto/ec-utils/src/bw6_761.rs b/substrate/primitives/crypto/ec-utils/src/bw6_761.rs new file mode 100644 index 000000000000..a68abf6e43e0 --- /dev/null +++ b/substrate/primitives/crypto/ec-utils/src/bw6_761.rs @@ -0,0 +1,186 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! *BW6-761* types and host functions. + +use crate::utils; +use ark_bw6_761_ext::CurveHooks; +use ark_ec::{pairing::Pairing, CurveConfig}; +use sp_runtime_interface::runtime_interface; +use sp_std::vec::Vec; + +/// First pairing group definitions. +pub mod g1 { + pub use ark_bw6_761_ext::g1::{G1_GENERATOR_X, G1_GENERATOR_Y}; + /// Group configuration. + pub type Config = ark_bw6_761_ext::g1::Config; + /// Short Weierstrass form point affine representation. + pub type G1Affine = ark_bw6_761_ext::g1::G1Affine; + /// Short Weierstrass form point projective representation. + pub type G1Projective = ark_bw6_761_ext::g1::G1Projective; +} + +/// Second pairing group definitions. +pub mod g2 { + pub use ark_bw6_761_ext::g2::{G2_GENERATOR_X, G2_GENERATOR_Y}; + /// Group configuration. + pub type Config = ark_bw6_761_ext::g2::Config; + /// Short Weierstrass form point affine representation. + pub type G2Affine = ark_bw6_761_ext::g2::G2Affine; + /// Short Weierstrass form point projective representation. + pub type G2Projective = ark_bw6_761_ext::g2::G2Projective; +} + +pub use self::{ + g1::{Config as G1Config, G1Affine, G1Projective}, + g2::{Config as G2Config, G2Affine, G2Projective}, +}; + +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; + +/// Configuration for *BW6-361* curve. +pub type Config = ark_bw6_761_ext::Config; + +/// *BW6-361* definition. +/// +/// A generic *BW6* model specialized with *BW6-761* configuration. +pub type BW6_761 = ark_bw6_761_ext::BW6_761; + +impl CurveHooks for HostHooks { + fn bw6_761_multi_miller_loop( + g1: impl Iterator::G1Prepared>, + g2: impl Iterator::G2Prepared>, + ) -> Result<::TargetField, ()> { + let g1 = utils::encode(g1.collect::>()); + let g2 = utils::encode(g2.collect::>()); + let res = host_calls::bw6_761_multi_miller_loop(g1, g2).unwrap_or_default(); + utils::decode(res) + } + + fn bw6_761_final_exponentiation( + target: ::TargetField, + ) -> Result<::TargetField, ()> { + let target = utils::encode(target); + let res = host_calls::bw6_761_final_exponentiation(target).unwrap_or_default(); + utils::decode(res) + } + + fn bw6_761_msm_g1( + bases: &[G1Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bw6_761_msm_g1(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bw6_761_msm_g2( + bases: &[G2Affine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::bw6_761_msm_g2(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bw6_761_mul_projective_g1(base: &G1Projective, scalar: &[u64]) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bw6_761_mul_projective_g1(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn bw6_761_mul_projective_g2(base: &G2Projective, scalar: &[u64]) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::bw6_761_mul_projective_g2(base, scalar).unwrap_or_default(); + utils::decode_proj_sw(res) + } +} + +/// Interfaces for working with *Arkworks* *BW6-761* elliptic curve related types +/// from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Pairing multi Miller loop for *BW6-761*. + /// + /// - Receives encoded: + /// - `a: ArkScale>`. + /// - `b: ArkScale>`. + /// - Returns encoded: `ArkScale`. + fn bw6_761_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { + utils::multi_miller_loop::(a, b) + } + + /// Pairing final exponentiation for *BW6-761*. + /// + /// - Receives encoded: `ArkScale`. + /// - Returns encoded: `ArkScale`. + fn bw6_761_final_exponentiation(f: Vec) -> Result, ()> { + utils::final_exponentiation::(f) + } + + /// Multi scalar multiplication on *G1* for *BW6-761*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale`. + /// - Returns encoded: `ArkScaleProjective`. + fn bw6_761_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } + + /// Multi scalar multiplication on *G2* for *BW6-761*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bw6_761_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } + + /// Projective multiplication on *G1* for *BW6-761*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bw6_761_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } + + /// Projective multiplication on *G2* for *BW6-761*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn bw6_761_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } +} diff --git a/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs b/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs new file mode 100644 index 000000000000..a03be41b8542 --- /dev/null +++ b/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_377.rs @@ -0,0 +1,88 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! *Ed-on-BLS12-377* types and host functions. + +use crate::utils; +use ark_ec::CurveConfig; +use ark_ed_on_bls12_377_ext::CurveHooks; +use sp_runtime_interface::runtime_interface; +use sp_std::vec::Vec; + +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; + +/// Group configuration. +pub type EdwardsConfig = ark_ed_on_bls12_377_ext::EdwardsConfig; +/// Twisted Edwards form point affine representation. +pub type EdwardsAffine = ark_ed_on_bls12_377_ext::EdwardsAffine; +/// Twisted Edwards form point projective representation. +pub type EdwardsProjective = ark_ed_on_bls12_377_ext::EdwardsProjective; + +impl CurveHooks for HostHooks { + fn ed_on_bls12_377_msm( + bases: &[EdwardsAffine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = host_calls::ed_on_bls12_377_te_msm(bases, scalars).unwrap_or_default(); + utils::decode_proj_te(res) + } + + fn ed_on_bls12_377_mul_projective( + base: &EdwardsProjective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_te(base); + let scalar = utils::encode(scalar); + let res = host_calls::ed_on_bls12_377_te_mul_projective(base, scalar).unwrap_or_default(); + utils::decode_proj_te(res) + } +} + +/// Interfaces for working with *Arkworks* *Ed-on-BLS12-377* elliptic curve +/// related types from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Twisted Edwards multi scalar multiplication for *Ed-on-BLS12-377*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_377_te_msm(bases: Vec, scalars: Vec) -> Result, ()> { + utils::msm_te::(bases, scalars) + } + + /// Twisted Edwards projective multiplication for *Ed-on-BLS12-377*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_377_te_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { + utils::mul_projective_te::(base, scalar) + } +} diff --git a/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs b/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs new file mode 100644 index 000000000000..9d63f3587652 --- /dev/null +++ b/substrate/primitives/crypto/ec-utils/src/ed_on_bls12_381_bandersnatch.rs @@ -0,0 +1,153 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Elliptic Curves host functions to handle some of the *Arkworks* *Ed-on-BLS12-381-Bandersnatch* +//! computationally expensive operations. + +use crate::utils; +use ark_ec::CurveConfig; +use ark_ed_on_bls12_381_bandersnatch_ext::CurveHooks; +use sp_runtime_interface::runtime_interface; +use sp_std::vec::Vec; + +/// Curve hooks jumping into [`host_calls`] host functions. +#[derive(Copy, Clone)] +pub struct HostHooks; + +/// Group configuration. +pub type BandersnatchConfig = ark_ed_on_bls12_381_bandersnatch_ext::BandersnatchConfig; +/// Group configuration for Twisted Edwards form (equal to [`BandersnatchConfig`]). +pub type EdwardsConfig = ark_ed_on_bls12_381_bandersnatch_ext::EdwardsConfig; +/// Twisted Edwards form point affine representation. +pub type EdwardsAffine = ark_ed_on_bls12_381_bandersnatch_ext::EdwardsAffine; +/// Twisted Edwards form point projective representation. +pub type EdwardsProjective = ark_ed_on_bls12_381_bandersnatch_ext::EdwardsProjective; +/// Group configuration for Short Weierstrass form (equal to [`BandersnatchConfig`]). +pub type SWConfig = ark_ed_on_bls12_381_bandersnatch_ext::SWConfig; +/// Short Weierstrass form point affine representation. +pub type SWAffine = ark_ed_on_bls12_381_bandersnatch_ext::SWAffine; +/// Short Weierstrass form point projective representation. +pub type SWProjective = ark_ed_on_bls12_381_bandersnatch_ext::SWProjective; + +impl CurveHooks for HostHooks { + fn ed_on_bls12_381_bandersnatch_te_msm( + bases: &[EdwardsAffine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = + host_calls::ed_on_bls12_381_bandersnatch_te_msm(bases, scalars).unwrap_or_default(); + utils::decode_proj_te(res) + } + + fn ed_on_bls12_381_bandersnatch_te_mul_projective( + base: &EdwardsProjective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_te(base); + let scalar = utils::encode(scalar); + let res = host_calls::ed_on_bls12_381_bandersnatch_te_mul_projective(base, scalar) + .unwrap_or_default(); + utils::decode_proj_te(res) + } + + fn ed_on_bls12_381_bandersnatch_sw_msm( + bases: &[SWAffine], + scalars: &[::ScalarField], + ) -> Result { + let bases = utils::encode(bases); + let scalars = utils::encode(scalars); + let res = + host_calls::ed_on_bls12_381_bandersnatch_sw_msm(bases, scalars).unwrap_or_default(); + utils::decode_proj_sw(res) + } + + fn ed_on_bls12_381_bandersnatch_sw_mul_projective( + base: &SWProjective, + scalar: &[u64], + ) -> Result { + let base = utils::encode_proj_sw(base); + let scalar = utils::encode(scalar); + let res = host_calls::ed_on_bls12_381_bandersnatch_sw_mul_projective(base, scalar) + .unwrap_or_default(); + utils::decode_proj_sw(res) + } +} + +/// Interfaces for working with *Arkworks* *Ed-on-BLS12-381-Bandersnatch* elliptic curve +/// related types from within the runtime. +/// +/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, +/// with `ark_scale::{ArkScale, ArkScaleProjective}`. +/// +/// `ArkScale`'s `Usage` generic parameter is expected to be set to "not-validated" +/// and "not-compressed". +#[runtime_interface] +pub trait HostCalls { + /// Twisted Edwards multi scalar multiplication for *Ed-on-BLS12-381-Bandersnatch*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_381_bandersnatch_te_msm( + bases: Vec, + scalars: Vec, + ) -> Result, ()> { + utils::msm_te::(bases, scalars) + } + + /// Twisted Edwards projective multiplication for *Ed-on-BLS12-381-Bandersnatch*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_381_bandersnatch_te_mul_projective( + base: Vec, + scalar: Vec, + ) -> Result, ()> { + utils::mul_projective_te::(base, scalar) + } + + /// Short Weierstrass multi scalar multiplication for *Ed-on-BLS12-381-Bandersnatch*. + /// + /// - Receives encoded: + /// - `bases`: `ArkScale>`. + /// - `scalars`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_381_bandersnatch_sw_msm( + bases: Vec, + scalars: Vec, + ) -> Result, ()> { + utils::msm_sw::(bases, scalars) + } + + /// Short Weierstrass projective multiplication for *Ed-on-BLS12-381-Bandersnatch*. + /// + /// - Receives encoded: + /// - `base`: `ArkScaleProjective`. + /// - `scalar`: `ArkScale>`. + /// - Returns encoded: `ArkScaleProjective`. + fn ed_on_bls12_381_bandersnatch_sw_mul_projective( + base: Vec, + scalar: Vec, + ) -> Result, ()> { + utils::mul_projective_sw::(base, scalar) + } +} diff --git a/substrate/primitives/crypto/ec-utils/src/lib.rs b/substrate/primitives/crypto/ec-utils/src/lib.rs index c5cc85077391..e3aea98faa1e 100644 --- a/substrate/primitives/crypto/ec-utils/src/lib.rs +++ b/substrate/primitives/crypto/ec-utils/src/lib.rs @@ -15,272 +15,39 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Elliptic Curves host functions which may be used to handle some of the *Arkworks* -//! computationally expensive operations. +//! Elliptic curves which are mostly compatible with *Arkworks* library +//! mostly useful in non-native contexts. +//! +//! The definitions make use of host functions to offload the non-native +//! computational environment from the some of the most computationally +//! expensive operations by internally leveraging the +//! [arkworks-extensions](https://github.com/paritytech/arkworks-extensions) +//! library. +//! +//! The exported types are organized and named in a way that mirrors the structure +//! of the types in the original Arkworks library. This design choice aims to make +//! it easier for users already familiar with the library to understand and utilize +//! the exported types effectively. #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "bls12-377")] +pub mod bls12_377; +#[cfg(feature = "bls12-381")] +pub mod bls12_381; +#[cfg(feature = "bw6-761")] +pub mod bw6_761; +#[cfg(feature = "ed-on-bls12-377")] +pub mod ed_on_bls12_377; +#[cfg(feature = "ed-on-bls12-381-bandersnatch")] +pub mod ed_on_bls12_381_bandersnatch; + +#[cfg(any( + feature = "bls12-377", + feature = "bls12-381", + feature = "bw6-761", + feature = "ed-on-bls12-377", + feature = "ed-on-bls12-381-bandersnatch", +))] mod utils; - -use sp_runtime_interface::runtime_interface; -use sp_std::vec::Vec; -use utils::*; - -/// Interfaces for working with *Arkworks* elliptic curves related types from within the runtime. -/// -/// All types are (de-)serialized through the wrapper types from the `ark-scale` trait, -/// with `ark_scale::{ArkScale, ArkScaleProjective}`. -/// -/// `ArkScale`'s `Usage` generic parameter is expected to be set to `HOST_CALL`, which is -/// a shortcut for "not-validated" and "not-compressed". -#[runtime_interface] -pub trait EllipticCurves { - /// Pairing multi Miller loop for BLS12-377. - /// - /// - Receives encoded: - /// - `a: ArkScale>>`. - /// - `b: ArkScale>>`. - /// - Returns encoded: ArkScale>>. - fn bls12_377_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - multi_miller_loop::(a, b) - } - - /// Pairing final exponentiation for BLS12-377. - /// - /// - Receives encoded: `ArkScale>>`. - /// - Returns encoded: `ArkScale>>`. - fn bls12_377_final_exponentiation(f: Vec) -> Result, ()> { - final_exponentiation::(f) - } - - /// Projective multiplication on G1 for BLS12-377. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalar`: `ArkScale<&[u64]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bls12_377_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_sw::(base, scalar) - } - - /// Projective multiplication on G2 for BLS12-377. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalar`: `ArkScale<&[u64]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bls12_377_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_sw::(base, scalar) - } - - /// Multi scalar multiplication on G1 for BLS12-377. - /// - /// - Receives encoded: - /// - `bases`: `ArkScale<&[ark_bls12_377::G1Affine]>`. - /// - `scalars`: `ArkScale<&[ark_bls12_377::Fr]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bls12_377_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw::(bases, scalars) - } - - /// Multi scalar multiplication on G2 for BLS12-377. - /// - /// - Receives encoded: - /// - `bases`: `ArkScale<&[ark_bls12_377::G2Affine]>`. - /// - `scalars`: `ArkScale<&[ark_bls12_377::Fr]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bls12_377_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw::(bases, scalars) - } - - /// Pairing multi Miller loop for BLS12-381. - /// - /// - Receives encoded: - /// - `a`: `ArkScale>>`. - /// - `b`: `ArkScale>>`. - /// - Returns encoded: ArkScale>> - fn bls12_381_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - multi_miller_loop::(a, b) - } - - /// Pairing final exponentiation for BLS12-381. - /// - /// - Receives encoded: `ArkScale>>`. - /// - Returns encoded: `ArkScale>>`. - fn bls12_381_final_exponentiation(f: Vec) -> Result, ()> { - final_exponentiation::(f) - } - - /// Projective multiplication on G1 for BLS12-381. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalar`: `ArkScale<&[u64]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bls12_381_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_sw::(base, scalar) - } - - /// Projective multiplication on G2 for BLS12-381. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalar`: `ArkScale<&[u64]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bls12_381_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_sw::(base, scalar) - } - - /// Multi scalar multiplication on G1 for BLS12-381. - /// - /// - Receives encoded: - /// - bases: `ArkScale<&[ark_bls12_381::G1Affine]>`. - /// - scalars: `ArkScale<&[ark_bls12_381::Fr]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bls12_381_msm_g1(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw::(bases, scalars) - } - - /// Multi scalar multiplication on G2 for BLS12-381. - /// - /// - Receives encoded: - /// - `bases`: `ArkScale<&[ark_bls12_381::G2Affine]>`. - /// - `scalars`: `ArkScale<&[ark_bls12_381::Fr]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bls12_381_msm_g2(bases: Vec, scalars: Vec) -> Result, ()> { - msm_sw::(bases, scalars) - } - - /// Pairing multi Miller loop for BW6-761. - /// - /// - Receives encoded: - /// - `a`: `ArkScale>>`. - /// - `b`: `ArkScale>>`. - /// - Returns encoded: `ArkScale>>`. - fn bw6_761_multi_miller_loop(a: Vec, b: Vec) -> Result, ()> { - multi_miller_loop::(a, b) - } - - /// Pairing final exponentiation for BW6-761. - /// - /// - Receives encoded: `ArkScale>>`. - /// - Returns encoded: `ArkScale>>`. - fn bw6_761_final_exponentiation(f: Vec) -> Result, ()> { - final_exponentiation::(f) - } - - /// Projective multiplication on G1 for BW6-761. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalar`: `ArkScale<&[u64]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bw6_761_mul_projective_g1(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_sw::(base, scalar) - } - - /// Projective multiplication on G2 for BW6-761. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalar`: `ArkScale<&[u64]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bw6_761_mul_projective_g2(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_sw::(base, scalar) - } - - /// Multi scalar multiplication on G1 for BW6-761. - /// - /// - Receives encoded: - /// - `bases`: `ArkScale<&[ark_bw6_761::G1Affine]>`. - /// - `scalars`: `ArkScale<&[ark_bw6_761::Fr]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bw6_761_msm_g1(bases: Vec, bigints: Vec) -> Result, ()> { - msm_sw::(bases, bigints) - } - - /// Multi scalar multiplication on G2 for BW6-761. - /// - /// - Receives encoded: - /// - `bases`: `ArkScale<&[ark_bw6_761::G2Affine]>`. - /// - `scalars`: `ArkScale<&[ark_bw6_761::Fr]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn bw6_761_msm_g2(bases: Vec, bigints: Vec) -> Result, ()> { - msm_sw::(bases, bigints) - } - - /// Twisted Edwards projective multiplication for Ed-on-BLS12-377. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalar`: `ArkScale<&[u64]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn ed_on_bls12_377_mul_projective(base: Vec, scalar: Vec) -> Result, ()> { - mul_projective_te::(base, scalar) - } - - /// Twisted Edwards multi scalar multiplication for Ed-on-BLS12-377. - /// - /// - Receives encoded: - /// - `bases`: `ArkScale<&[ark_ed_on_bls12_377::EdwardsAffine]>`. - /// - `scalars`: `ArkScale<&[ark_ed_on_bls12_377::Fr]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn ed_on_bls12_377_msm(bases: Vec, scalars: Vec) -> Result, ()> { - msm_te::(bases, scalars) - } - - /// Short Weierstrass projective multiplication for Ed-on-BLS12-381-Bandersnatch. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalar`: `ArkScale<&[u64]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn ed_on_bls12_381_bandersnatch_sw_mul_projective( - base: Vec, - scalar: Vec, - ) -> Result, ()> { - mul_projective_sw::(base, scalar) - } - - /// Twisted Edwards projective multiplication for Ed-on-BLS12-381-Bandersnatch. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalar`: `ArkScale<&[u64]>`. - /// - Returns encoded: - /// `ArkScaleProjective`. - fn ed_on_bls12_381_bandersnatch_te_mul_projective( - base: Vec, - scalar: Vec, - ) -> Result, ()> { - mul_projective_te::(base, scalar) - } - - /// Short Weierstrass multi scalar multiplication for Ed-on-BLS12-381-Bandersnatch. - /// - /// - Receives encoded: - /// - `bases`: `ArkScale<&[ark_ed_on_bls12_381_bandersnatch::SWAffine]>`. - /// - `scalars`: `ArkScale<&[ark_ed_on_bls12_381_bandersnatch::Fr]>`. - /// - Returns encoded: `ArkScaleProjective`. - fn ed_on_bls12_381_bandersnatch_sw_msm( - bases: Vec, - scalars: Vec, - ) -> Result, ()> { - msm_sw::(bases, scalars) - } - - /// Twisted Edwards multi scalar multiplication for Ed-on-BLS12-381-Bandersnatch. - /// - /// - Receives encoded: - /// - `base`: `ArkScaleProjective`. - /// - `scalars`: `ArkScale<&[ark_ed_on_bls12_381_bandersnatch::Fr]>`. - /// - Returns encoded: - /// `ArkScaleProjective`. - fn ed_on_bls12_381_bandersnatch_te_msm( - bases: Vec, - scalars: Vec, - ) -> Result, ()> { - msm_te::(bases, scalars) - } -} diff --git a/substrate/primitives/crypto/ec-utils/src/utils.rs b/substrate/primitives/crypto/ec-utils/src/utils.rs index 063b8fac7ad3..d0dd8ed8131c 100644 --- a/substrate/primitives/crypto/ec-utils/src/utils.rs +++ b/substrate/primitives/crypto/ec-utils/src/utils.rs @@ -17,109 +17,100 @@ //! Generic executions of the operations for *Arkworks* elliptic curves. +// As not all functions are used by each elliptic curve and some elliptic +// curve may be excluded by the build we resort to `#[allow(unused)]` to +// suppress the expected warning. + use ark_ec::{ - pairing::{MillerLoopOutput, Pairing, PairingOutput}, - short_weierstrass, - short_weierstrass::SWCurveConfig, - twisted_edwards, - twisted_edwards::TECurveConfig, + pairing::{MillerLoopOutput, Pairing}, + short_weierstrass::{Affine as SWAffine, Projective as SWProjective, SWCurveConfig}, + twisted_edwards::{Affine as TEAffine, Projective as TEProjective, TECurveConfig}, CurveConfig, VariableBaseMSM, }; use ark_scale::{ - hazmat::ArkScaleProjective, + ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}, scale::{Decode, Encode}, }; use sp_std::vec::Vec; -// Scale codec type which is expected to be used by the host functions. -// -// Encoding is set to `HOST_CALL` which is a shortcut for "not-validated" and "not-compressed". -type ArkScale = ark_scale::ArkScale; - -pub fn multi_miller_loop(g1: Vec, g2: Vec) -> Result, ()> { - let g1 = ::G1Affine>> as Decode>::decode(&mut g1.as_slice()) - .map_err(|_| ())?; - let g2 = ::G2Affine>> as Decode>::decode(&mut g2.as_slice()) - .map_err(|_| ())?; +// SCALE encoding parameters shared by all the enabled modules +const SCALE_USAGE: u8 = ark_scale::make_usage(Compress::No, Validate::No); +type ArkScale = ark_scale::ArkScale; +type ArkScaleProjective = ark_scale::hazmat::ArkScaleProjective; - let result = Curve::multi_miller_loop(g1.0, g2.0).0; - - let result: ArkScale<::TargetField> = result.into(); - Ok(result.encode()) +#[inline(always)] +pub fn encode(val: T) -> Vec { + ArkScale::from(val).encode() } -pub fn final_exponentiation(target: Vec) -> Result, ()> { - let target = - ::TargetField> as Decode>::decode(&mut target.as_slice()) - .map_err(|_| ())?; - - let result = Curve::final_exponentiation(MillerLoopOutput(target.0)).ok_or(())?; - - let result: ArkScale> = result.into(); - Ok(result.encode()) +#[inline(always)] +pub fn decode(buf: Vec) -> Result { + ArkScale::::decode(&mut &buf[..]).map_err(|_| ()).map(|v| v.0) } -pub fn msm_sw(bases: Vec, scalars: Vec) -> Result, ()> { - let bases = - >> as Decode>::decode(&mut bases.as_slice()) - .map_err(|_| ())?; - let scalars = ::ScalarField>> as Decode>::decode( - &mut scalars.as_slice(), - ) - .map_err(|_| ())?; - - let result = - as VariableBaseMSM>::msm(&bases.0, &scalars.0) - .map_err(|_| ())?; - - let result: ArkScaleProjective> = result.into(); - Ok(result.encode()) +#[inline(always)] +pub fn encode_proj_sw(val: &SWProjective) -> Vec { + ArkScaleProjective::from(val).encode() } -pub fn msm_te(bases: Vec, scalars: Vec) -> Result, ()> { - let bases = - >> as Decode>::decode(&mut bases.as_slice()) - .map_err(|_| ())?; - let scalars = ::ScalarField>> as Decode>::decode( - &mut scalars.as_slice(), - ) - .map_err(|_| ())?; +#[inline(always)] +pub fn decode_proj_sw(buf: Vec) -> Result, ()> { + ArkScaleProjective::decode(&mut &buf[..]).map_err(|_| ()).map(|v| v.0) +} - let result = as VariableBaseMSM>::msm(&bases.0, &scalars.0) - .map_err(|_| ())?; +#[inline(always)] +pub fn encode_proj_te(val: &TEProjective) -> Vec { + ArkScaleProjective::from(val).encode() +} - let result: ArkScaleProjective> = result.into(); - Ok(result.encode()) +#[inline(always)] +pub fn decode_proj_te(buf: Vec) -> Result, ()> { + ArkScaleProjective::decode(&mut &buf[..]).map_err(|_| ()).map(|v| v.0) } -pub fn mul_projective_sw( - base: Vec, - scalar: Vec, -) -> Result, ()> { - let base = > as Decode>::decode( - &mut base.as_slice(), - ) - .map_err(|_| ())?; - let scalar = > as Decode>::decode(&mut scalar.as_slice()).map_err(|_| ())?; +#[allow(unused)] +pub fn multi_miller_loop(g1: Vec, g2: Vec) -> Result, ()> { + let g1 = decode::::G1Affine>>(g1)?; + let g2 = decode::::G2Affine>>(g2)?; + let res = T::multi_miller_loop(g1, g2); + Ok(encode(res.0)) +} - let result = ::mul_projective(&base.0, &scalar.0); +#[allow(unused)] +pub fn final_exponentiation(target: Vec) -> Result, ()> { + let target = decode::<::TargetField>(target)?; + let res = T::final_exponentiation(MillerLoopOutput(target)).ok_or(())?; + Ok(encode(res.0)) +} - let result: ArkScaleProjective> = result.into(); - Ok(result.encode()) +#[allow(unused)] +pub fn msm_sw(bases: Vec, scalars: Vec) -> Result, ()> { + let bases = decode::>>(bases)?; + let scalars = decode::::ScalarField>>(scalars)?; + let res = as VariableBaseMSM>::msm(&bases, &scalars).map_err(|_| ())?; + Ok(encode_proj_sw(&res)) } -pub fn mul_projective_te( - base: Vec, - scalar: Vec, -) -> Result, ()> { - let base = > as Decode>::decode( - &mut base.as_slice(), - ) - .map_err(|_| ())?; - let scalar = > as Decode>::decode(&mut scalar.as_slice()).map_err(|_| ())?; +#[allow(unused)] +pub fn msm_te(bases: Vec, scalars: Vec) -> Result, ()> { + let bases = decode::>>(bases)?; + let scalars = decode::::ScalarField>>(scalars)?; + let res = as VariableBaseMSM>::msm(&bases, &scalars).map_err(|_| ())?; + Ok(encode_proj_te(&res)) +} - let result = ::mul_projective(&base.0, &scalar.0); +#[allow(unused)] +pub fn mul_projective_sw(base: Vec, scalar: Vec) -> Result, ()> { + let base = decode_proj_sw::(base)?; + let scalar = decode::>(scalar)?; + let res = ::mul_projective(&base, &scalar); + Ok(encode_proj_sw(&res)) +} - let result: ArkScaleProjective> = result.into(); - Ok(result.encode()) +#[allow(unused)] +pub fn mul_projective_te(base: Vec, scalar: Vec) -> Result, ()> { + let base = decode_proj_te::(base)?; + let scalar = decode::>(scalar)?; + let res = ::mul_projective(&base, &scalar); + Ok(encode_proj_te(&res)) } From f1bfc08038252caf7f1225f668b4518a6149bb24 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 31 Oct 2023 16:06:07 +0200 Subject: [PATCH 39/69] xcmp-queue: remove outdated bridging comment (#2095) Removed confusing and outdated `TODO`. --- cumulus/pallets/xcmp-queue/src/bridging.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/cumulus/pallets/xcmp-queue/src/bridging.rs b/cumulus/pallets/xcmp-queue/src/bridging.rs index 0fc3f1f39ea3..78ccd4b62699 100644 --- a/cumulus/pallets/xcmp-queue/src/bridging.rs +++ b/cumulus/pallets/xcmp-queue/src/bridging.rs @@ -63,13 +63,6 @@ impl, Runtime: crate::Config> return true } - // TODO: https://github.com/paritytech/polkadot-sdk/pull/1556 - once this PR is merged, we may - // remove the following code. - // TODO: the following restriction is arguable, we may live without that, assuming that - // There can't be more than some `N` messages queued at the bridge queue (at the source BH) - // AND before accepting next (or next-after-next) delivery transaction, we'll receive the - // suspension signal from the target parachain and stop accepting delivery transactions. - // It takes some time for target parachain to suspend inbound channel with the target BH and // during that we will keep accepting new message delivery transactions. Let's also reject // new deliveries if there are too many "pages" (concatenated XCM messages) in the target BH From 0f76530bfb21ad30452f6c60a33637a72a9bbd08 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 31 Oct 2023 16:20:31 +0200 Subject: [PATCH 40/69] Update polkadot/xcm/xcm-builder/src/barriers.rs Co-authored-by: Branislav Kontur --- polkadot/xcm/xcm-builder/src/barriers.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 50098791b443..3b13cab2c1ea 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -88,8 +88,6 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFro _ => Err(ProcessMessageError::BadFormat), })? .skip_inst_while(|inst| matches!(inst, ClearOrigin))? - // allow setting fees mode to jit or not for use in following `BuyExecution` - .skip_inst_while(|inst| matches!(inst, SetFeesMode { .. }))? .match_next_inst(|inst| match inst { BuyExecution { weight_limit: Limited(ref mut weight), .. } if weight.all_gte(max_weight) => From 9d3f2dda31bd3e75044e8a5932f2c6c1eafa142b Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 31 Oct 2023 16:21:08 +0200 Subject: [PATCH 41/69] Update polkadot/xcm/pallet-xcm/src/lib.rs Co-authored-by: Branislav Kontur --- polkadot/xcm/pallet-xcm/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index fcd51fd7b0f2..0d146dff9e44 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1219,7 +1219,7 @@ impl Pallet { ensure!(!x.is_zero(), Error::::Empty); } let transfer_type = - ::determine_for(&asset, dest) + T::XcmExecutor::determine_for(&asset, dest) .map_err(Error::::from)?; // Ensure asset is not teleportable to `dest`. ensure!(transfer_type != TransferType::Teleport, Error::::Filtered); From 9feb4b0103819eaac668cafde11b4840ecba0c7f Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 31 Oct 2023 16:21:23 +0200 Subject: [PATCH 42/69] Update polkadot/xcm/pallet-xcm/src/lib.rs Co-authored-by: Branislav Kontur --- polkadot/xcm/pallet-xcm/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 0d146dff9e44..f57a4720282b 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1259,7 +1259,7 @@ impl Pallet { } let fees = assets.swap_remove(fee_asset_item as usize); let fees_transfer_type = - ::determine_for(&fees, &dest) + T::XcmExecutor::determine_for(&fees, &dest) .map_err(Error::::from)?; let assets_transfer_type = if assets.is_empty() { // Single asset to transfer (one used for fees where transfer type is determined above). From 0a38b70d17fe5fb059e847381280769624372073 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 31 Oct 2023 16:21:39 +0200 Subject: [PATCH 43/69] Update polkadot/xcm/pallet-xcm/src/lib.rs Co-authored-by: Branislav Kontur --- polkadot/xcm/pallet-xcm/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index f57a4720282b..3cc2d600d62c 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1332,7 +1332,7 @@ impl Pallet { let (origin_location, assets) = value; for asset in assets.iter() { let transfer_type = - ::determine_for(asset, &dest) + T::XcmExecutor::determine_for(asset, &dest) .map_err(Error::::from)?; ensure!(matches!(transfer_type, TransferType::Teleport), Error::::Filtered); } From 02447d00d928b27bd94022fa87eac09ec1978af2 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 31 Oct 2023 16:22:56 +0200 Subject: [PATCH 44/69] fmt --- polkadot/xcm/pallet-xcm/src/lib.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 3cc2d600d62c..3af0a48b2440 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1219,8 +1219,7 @@ impl Pallet { ensure!(!x.is_zero(), Error::::Empty); } let transfer_type = - T::XcmExecutor::determine_for(&asset, dest) - .map_err(Error::::from)?; + T::XcmExecutor::determine_for(&asset, dest).map_err(Error::::from)?; // Ensure asset is not teleportable to `dest`. ensure!(transfer_type != TransferType::Teleport, Error::::Filtered); if let Some(reserve) = reserve.as_ref() { @@ -1259,8 +1258,7 @@ impl Pallet { } let fees = assets.swap_remove(fee_asset_item as usize); let fees_transfer_type = - T::XcmExecutor::determine_for(&fees, &dest) - .map_err(Error::::from)?; + T::XcmExecutor::determine_for(&fees, &dest).map_err(Error::::from)?; let assets_transfer_type = if assets.is_empty() { // Single asset to transfer (one used for fees where transfer type is determined above). ensure!(fees_transfer_type != TransferType::Teleport, Error::::Filtered); @@ -1332,8 +1330,7 @@ impl Pallet { let (origin_location, assets) = value; for asset in assets.iter() { let transfer_type = - T::XcmExecutor::determine_for(asset, &dest) - .map_err(Error::::from)?; + T::XcmExecutor::determine_for(asset, &dest).map_err(Error::::from)?; ensure!(matches!(transfer_type, TransferType::Teleport), Error::::Filtered); } let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); From ada1ac3dcdfdf0cac66b0c8b6ecc12de9e8657c5 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Tue, 31 Oct 2023 13:33:01 -0300 Subject: [PATCH 45/69] [DNM] bump zombienet version (#2083) This version includes: - Move `spot` usage in CI to 50% - Fix `PodMonitor`, metrics will be relayed to grafana --- .gitlab-ci.yml | 2 +- polkadot/zombienet_tests/functional/0001-parachains-pvf.toml | 3 +-- .../zombienet_tests/functional/0002-parachains-disputes.toml | 1 - .../functional/0004-parachains-garbage-candidate.toml | 1 - polkadot/zombienet_tests/misc/0001-paritydb.toml | 1 - 5 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 069068369aba..835b668de259 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,7 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.71" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.75" DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" default: diff --git a/polkadot/zombienet_tests/functional/0001-parachains-pvf.toml b/polkadot/zombienet_tests/functional/0001-parachains-pvf.toml index 9ae4d899e690..53205867fd12 100644 --- a/polkadot/zombienet_tests/functional/0001-parachains-pvf.toml +++ b/polkadot/zombienet_tests/functional/0001-parachains-pvf.toml @@ -4,7 +4,6 @@ timeout = 1000 [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" -chain_spec_command = "polkadot build-spec --chain rococo-local --disable-default-bootnode" [relaychain.default_resources] limits = { memory = "4G", cpu = "2" } @@ -133,4 +132,4 @@ genesis_state_generator = "undying-collator export-genesis-state --pov-size=1000 [types.Header] number = "u64" parent_hash = "Hash" -post_state = "Hash" \ No newline at end of file +post_state = "Hash" diff --git a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml index e6aeb8e245c2..c63d3e021708 100644 --- a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml +++ b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml @@ -8,7 +8,6 @@ timeout = 1000 [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" -chain_spec_command = "polkadot build-spec --chain rococo-local --disable-default-bootnode" default_command = "polkadot" [relaychain.default_resources] diff --git a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml index ef27d7b92f02..94eb6bb5bb1d 100644 --- a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml +++ b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml @@ -9,7 +9,6 @@ bootnode = true [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" -chain_spec_command = "polkadot build-spec --chain rococo-local --disable-default-bootnode" default_command = "polkadot" [relaychain.default_resources] diff --git a/polkadot/zombienet_tests/misc/0001-paritydb.toml b/polkadot/zombienet_tests/misc/0001-paritydb.toml index 99dc9c66e26e..e2130963f0ec 100644 --- a/polkadot/zombienet_tests/misc/0001-paritydb.toml +++ b/polkadot/zombienet_tests/misc/0001-paritydb.toml @@ -9,7 +9,6 @@ bootnode = true [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" -chain_spec_command = "polkadot build-spec --chain rococo-local" default_command = "polkadot" [relaychain.default_resources] From 64f4b15640f3ee0f2e5d00d8603c8d9771f7ce61 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Tue, 31 Oct 2023 18:40:12 +0200 Subject: [PATCH 46/69] polkadot: parachains: Fix v9 host configuration migration (#2103) We shouldn't override with their default fields that have been added in the previous version(v8), because we are going to lose whatever values have been set. Note, v8 & v9 seems to have landed at the same time on Rococo, probably they will land at the same time on westend and other chains, so functionally doesn't make much difference, but let's have this fixed for people that copy-paste :D, like me. --------- Signed-off-by: Alexandru Gheorghe --- .../parachains/src/configuration/migration/v9.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/polkadot/runtime/parachains/src/configuration/migration/v9.rs b/polkadot/runtime/parachains/src/configuration/migration/v9.rs index 64d71e628f4e..e37f0b9b0e3d 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v9.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v9.rs @@ -24,7 +24,6 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{SessionIndex, LEGACY_MIN_BACKING_VOTES}; -use sp_runtime::Perbill; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; @@ -145,11 +144,11 @@ pvf_voting_ttl : pre.pvf_voting_ttl, minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, async_backing_params : pre.async_backing_params, executor_params : pre.executor_params, -on_demand_queue_max_size : 10_000u32, -on_demand_base_fee : 10_000_000u128, -on_demand_fee_variability : Perbill::from_percent(3), -on_demand_target_queue_utilization : Perbill::from_percent(25), -on_demand_ttl : 5u32.into(), +on_demand_queue_max_size : pre.on_demand_queue_max_size, +on_demand_base_fee : pre.on_demand_base_fee, +on_demand_fee_variability : pre.on_demand_fee_variability, +on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, +on_demand_ttl : pre.on_demand_ttl, minimum_backing_votes : LEGACY_MIN_BACKING_VOTES } }; From 6e2f94f81c79665cc573ab738f209780d61e9bf0 Mon Sep 17 00:00:00 2001 From: Adel Arja Date: Tue, 31 Oct 2023 14:35:19 -0300 Subject: [PATCH 47/69] 1953 defensive testing extrinsic (#1998) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description The `trigger_defensive` call has been added to the `root-testing` pallet. The idea is to have this pallet running on `Rococo/Westend` and use it to verify if the runtime monitoring works end-to-end. To accomplish this, `trigger_defensive` dispatches an event when it is called. Closes #1953 # Checklist - [x] My PR includes a detailed description as outlined in the "Description" section above - [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` required) - [ ] I have made corresponding changes to the documentation (if applicable) - [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) You can remove the "Checklist" section once all have been checked. Thank you for your contribution! ✄ ----------------------------------------------------------------------------- --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 2 ++ polkadot/runtime/rococo/Cargo.toml | 3 +++ polkadot/runtime/rococo/src/lib.rs | 7 +++++++ polkadot/runtime/westend/Cargo.toml | 3 +++ polkadot/runtime/westend/src/lib.rs | 7 +++++++ substrate/bin/node/runtime/src/lib.rs | 4 +++- substrate/frame/root-testing/src/lib.rs | 24 +++++++++++++++++++--- substrate/frame/support/src/traits/misc.rs | 6 +++--- substrate/frame/utility/src/tests.rs | 6 ++++-- 9 files changed, 53 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8cedf9f2c742..a292f0cb1ab3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14350,6 +14350,7 @@ dependencies = [ "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", + "pallet-root-testing", "pallet-scheduler", "pallet-session", "pallet-society", @@ -20370,6 +20371,7 @@ dependencies = [ "pallet-proxy", "pallet-recovery", "pallet-referenda", + "pallet-root-testing", "pallet-scheduler", "pallet-session", "pallet-session-benchmarking", diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 243f4337cae1..5b55d3fec503 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -86,6 +86,7 @@ pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } +pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } @@ -161,6 +162,7 @@ std = [ "pallet-ranked-collective/std", "pallet-recovery/std", "pallet-referenda/std", + "pallet-root-testing/std", "pallet-scheduler/std", "pallet-session/std", "pallet-society/std", @@ -290,6 +292,7 @@ try-runtime = [ "pallet-ranked-collective/try-runtime", "pallet-recovery/try-runtime", "pallet-referenda/try-runtime", + "pallet-root-testing/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", "pallet-society/try-runtime", diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index e6ad061ce069..b127eda3ba98 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1230,6 +1230,10 @@ impl pallet_sudo::Config for Runtime { type WeightInfo = weights::pallet_sudo::WeightInfo; } +impl pallet_root_testing::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + impl pallet_asset_rate::Config for Runtime { type WeightInfo = weights::pallet_asset_rate::WeightInfo; type RuntimeEvent = RuntimeEvent; @@ -1368,6 +1372,9 @@ construct_runtime! { // State trie migration pallet, only temporary. StateTrieMigration: pallet_state_trie_migration = 254, + // Root testing pallet. + RootTesting: pallet_root_testing::{Pallet, Call, Storage, Event} = 249, + // Sudo. Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index cb1118cf92fb..4148d6105917 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -92,6 +92,7 @@ pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } +pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } @@ -177,6 +178,7 @@ std = [ "pallet-proxy/std", "pallet-recovery/std", "pallet-referenda/std", + "pallet-root-testing/std", "pallet-scheduler/std", "pallet-session-benchmarking?/std", "pallet-session/std", @@ -315,6 +317,7 @@ try-runtime = [ "pallet-proxy/try-runtime", "pallet-recovery/try-runtime", "pallet-referenda/try-runtime", + "pallet-root-testing/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", "pallet-society/try-runtime", diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 9ee4f3cf23e5..b8b2e540e96b 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1349,6 +1349,10 @@ impl pallet_nomination_pools::Config for Runtime { type MaxPointsToBalance = MaxPointsToBalance; } +impl pallet_root_testing::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) pub const MigrationSignedDepositPerItem: Balance = 1 * CENTS; @@ -1483,6 +1487,9 @@ construct_runtime! { // Asset rate. AssetRate: pallet_asset_rate::{Pallet, Call, Storage, Event} = 101, + + // Root testing pallet. + RootTesting: pallet_root_testing::{Pallet, Call, Storage, Event} = 102, } } diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index f3c248976325..cb8d7f6b1de6 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1014,7 +1014,9 @@ impl pallet_remark::Config for Runtime { type RuntimeEvent = RuntimeEvent; } -impl pallet_root_testing::Config for Runtime {} +impl pallet_root_testing::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} parameter_types! { pub const LaunchPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; diff --git a/substrate/frame/root-testing/src/lib.rs b/substrate/frame/root-testing/src/lib.rs index bbcda09c3065..51fd835409ae 100644 --- a/substrate/frame/root-testing/src/lib.rs +++ b/substrate/frame/root-testing/src/lib.rs @@ -24,8 +24,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::dispatch::DispatchResult; -use sp_runtime::Perbill; +use frame_support::{dispatch::DispatchResult, sp_runtime::Perbill}; pub use pallet::*; @@ -36,11 +35,21 @@ pub mod pallet { use frame_system::pallet_prelude::*; #[pallet::config] - pub trait Config: frame_system::Config {} + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } #[pallet::pallet] pub struct Pallet(_); + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Event dispatched when the trigger_defensive extrinsic is called. + DefensiveTestCall, + } + #[pallet::call] impl Pallet { /// A dispatch that will fill the block weight up to the given ratio. @@ -50,5 +59,14 @@ pub mod pallet { ensure_root(origin)?; Ok(()) } + + #[pallet::call_index(1)] + #[pallet::weight(0)] + pub fn trigger_defensive(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + frame_support::defensive!("root_testing::trigger_defensive was called."); + Self::deposit_event(Event::DefensiveTestCall); + Ok(()) + } } } diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 45a3bba9b3a6..78032cc0a940 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -55,7 +55,7 @@ impl VariantCount for () { macro_rules! defensive { () => { frame_support::__private::log::error!( - target: "runtime", + target: "runtime::defensive", "{}", $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR ); @@ -63,7 +63,7 @@ macro_rules! defensive { }; ($error:expr $(,)?) => { frame_support::__private::log::error!( - target: "runtime", + target: "runtime::defensive", "{}: {:?}", $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR, $error @@ -72,7 +72,7 @@ macro_rules! defensive { }; ($error:expr, $proof:expr $(,)?) => { frame_support::__private::log::error!( - target: "runtime", + target: "runtime::defensive", "{}: {:?}: {:?}", $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR, $error, diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index cbd495a5c152..01e3f5264bff 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -132,7 +132,7 @@ frame_support::construct_runtime!( System: frame_system::{Pallet, Call, Config, Storage, Event}, Timestamp: pallet_timestamp::{Call, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - RootTesting: pallet_root_testing::{Pallet, Call, Storage}, + RootTesting: pallet_root_testing::{Pallet, Call, Storage, Event}, Council: pallet_collective::, Utility: utility::{Pallet, Call, Event}, Example: example::{Pallet, Call}, @@ -187,7 +187,9 @@ impl pallet_balances::Config for Test { type MaxHolds = (); } -impl pallet_root_testing::Config for Test {} +impl pallet_root_testing::Config for Test { + type RuntimeEvent = RuntimeEvent; +} impl pallet_timestamp::Config for Test { type Moment = u64; From 495d24d730bb9c2205a5305f383627eabda4a323 Mon Sep 17 00:00:00 2001 From: Lulu Date: Tue, 31 Oct 2023 18:04:31 +0000 Subject: [PATCH 48/69] Add ci check for parity-publish and fix current check issues (#1887) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Sergejs Kostjucenko <85877331+sergejparity@users.noreply.github.com> Co-authored-by: Bastian Köcher --- .github/workflows/check-publish.yml | 28 ++ .gitlab/pipeline/build.yml | 2 +- .gitlab/pipeline/test.yml | 4 +- Cargo.lock | 388 +++++++++--------- cumulus/client/cli/Cargo.toml | 1 + cumulus/client/collator/Cargo.toml | 1 + cumulus/client/consensus/aura/Cargo.toml | 1 + cumulus/client/consensus/common/Cargo.toml | 1 + cumulus/client/consensus/proposer/Cargo.toml | 1 + .../client/consensus/relay-chain/Cargo.toml | 1 + cumulus/client/network/Cargo.toml | 1 + cumulus/client/pov-recovery/Cargo.toml | 1 + .../Cargo.toml | 1 + .../client/relay-chain-interface/Cargo.toml | 1 + .../relay-chain-minimal-node/Cargo.toml | 1 + .../relay-chain-rpc-interface/Cargo.toml | 1 + cumulus/client/service/Cargo.toml | 1 + cumulus/pallets/aura-ext/Cargo.toml | 1 + cumulus/pallets/dmp-queue/Cargo.toml | 1 + cumulus/pallets/parachain-system/Cargo.toml | 1 + .../parachain-system/proc-macro/Cargo.toml | 1 + cumulus/pallets/solo-to-para/Cargo.toml | 1 + cumulus/pallets/xcm/Cargo.toml | 1 + cumulus/pallets/xcmp-queue/Cargo.toml | 1 + cumulus/pallets/xcmp-queue/src/bridging.rs | 4 +- cumulus/pallets/xcmp-queue/src/tests.rs | 22 +- cumulus/parachain-template/runtime/Cargo.toml | 2 +- cumulus/parachains/common/Cargo.toml | 3 +- .../pallets/collective-content/Cargo.toml | 1 + .../pallets/parachain-info/Cargo.toml | 3 +- cumulus/parachains/pallets/ping/Cargo.toml | 1 + .../assets/asset-hub-kusama/Cargo.toml | 3 +- .../assets/asset-hub-polkadot/Cargo.toml | 3 +- .../assets/asset-hub-rococo/Cargo.toml | 3 +- .../assets/asset-hub-westend/Cargo.toml | 3 +- .../runtimes/assets/common/Cargo.toml | 1 + .../runtimes/assets/test-utils/Cargo.toml | 3 +- .../bridge-hubs/bridge-hub-kusama/Cargo.toml | 3 +- .../bridge-hub-polkadot/Cargo.toml | 3 +- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 3 +- .../bridge-hubs/test-utils/Cargo.toml | 3 +- .../collectives-polkadot/Cargo.toml | 3 +- .../contracts/contracts-rococo/Cargo.toml | 3 +- .../glutton/glutton-kusama/Cargo.toml | 3 +- .../runtimes/starters/seedling/Cargo.toml | 3 +- .../runtimes/starters/shell/Cargo.toml | 3 +- .../parachains/runtimes/test-utils/Cargo.toml | 3 +- .../runtimes/testing/penpal/Cargo.toml | 2 +- .../testing/rococo-parachain/Cargo.toml | 4 +- cumulus/polkadot-parachain/Cargo.toml | 1 + cumulus/primitives/aura/Cargo.toml | 1 + cumulus/primitives/core/Cargo.toml | 1 + .../primitives/parachain-inherent/Cargo.toml | 1 + cumulus/primitives/timestamp/Cargo.toml | 1 + cumulus/primitives/utility/Cargo.toml | 1 + cumulus/test/relay-sproof-builder/Cargo.toml | 1 + cumulus/xcm/xcm-emulator/Cargo.toml | 1 + substrate/Cargo.toml | 3 +- substrate/bin/node/cli/Cargo.toml | 8 +- .../bin/node/cli/benches/block_production.rs | 1 + .../bin/node/cli/benches/transaction_pool.rs | 1 + substrate/bin/node/cli/bin/main.rs | 2 + substrate/bin/node/executor/Cargo.toml | 2 +- substrate/bin/node/executor/benches/bench.rs | 1 + substrate/bin/node/executor/tests/common.rs | 1 + substrate/bin/node/inspect/Cargo.toml | 3 +- substrate/bin/node/testing/Cargo.toml | 2 +- .../bin/utils/chain-spec-builder/Cargo.toml | 5 +- .../bin/utils/chain-spec-builder/bin/main.rs | 1 + substrate/frame/asset-rate/Cargo.toml | 1 - .../frame/examples/frame-crate/Cargo.toml | 1 - .../procedural/src/pallet/expand/warnings.rs | 8 +- substrate/scripts/ci/deny.toml | 2 +- substrate/test-utils/cli/Cargo.toml | 2 +- substrate/test-utils/cli/src/lib.rs | 2 +- 75 files changed, 334 insertions(+), 250 deletions(-) create mode 100644 .github/workflows/check-publish.yml diff --git a/.github/workflows/check-publish.yml b/.github/workflows/check-publish.yml new file mode 100644 index 000000000000..9ab47dba51b1 --- /dev/null +++ b/.github/workflows/check-publish.yml @@ -0,0 +1,28 @@ +name: Check publish + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + +jobs: + check-publish: + strategy: + matrix: + os: ["ubuntu-latest"] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + + - name: Rust Cache + uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish --profile dev + + - name: parity-publish check + run: parity-publish check --allow-unpublished diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index fefa3739a9ff..5c13045706c4 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -305,7 +305,7 @@ build-linux-substrate: # see https://github.com/paritytech/ci_cd/issues/682#issuecomment-1340953589 - git checkout -B "$CI_COMMIT_REF_NAME" "$CI_COMMIT_SHA" script: - - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release -p node-cli + - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release -p staging-node-cli - mv $CARGO_TARGET_DIR/release/substrate-node ./artifacts/substrate/substrate - echo -n "Substrate version = " - if [ "${CI_COMMIT_TAG}" ]; then diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 7d7007acd8a8..4ed3ec19c48a 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -352,7 +352,7 @@ quick-benchmarks: WASM_BUILD_NO_COLOR: 1 WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" script: - - time cargo run --locked --release -p node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + - time cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 test-frame-examples-compile-to-wasm: # into one job @@ -396,7 +396,7 @@ test-linux-stable-int: script: - WASM_BUILD_NO_COLOR=1 RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace - time cargo test -p node-cli --release --locked -- --ignored + time cargo test -p staging-node-cli --release --locked -- --ignored # more information about this job can be found here: # https://github.com/paritytech/substrate/pull/6916 diff --git a/Cargo.lock b/Cargo.lock index a292f0cb1ab3..f2b9b9ec45d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -774,7 +774,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -797,6 +796,7 @@ dependencies = [ "sp-transaction-pool", "sp-version", "sp-weights", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -844,7 +844,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -866,6 +865,7 @@ dependencies = [ "sp-transaction-pool", "sp-version", "sp-weights", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -947,7 +947,6 @@ dependencies = [ "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub-router", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -970,6 +969,7 @@ dependencies = [ "sp-transaction-pool", "sp-version", "sp-weights", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -1052,7 +1052,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -1075,6 +1074,7 @@ dependencies = [ "sp-storage", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -1102,7 +1102,6 @@ dependencies = [ "pallet-session", "pallet-xcm", "pallet-xcm-bridge-hub-router", - "parachain-info", "parachains-common", "parachains-runtimes-test-utils", "parity-scale-codec", @@ -1112,6 +1111,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -1994,7 +1994,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -2017,6 +2016,7 @@ dependencies = [ "sp-storage", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -2057,7 +2057,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -2080,6 +2079,7 @@ dependencies = [ "sp-storage", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -2160,7 +2160,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -2185,6 +2184,7 @@ dependencies = [ "sp-storage", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -2225,7 +2225,6 @@ dependencies = [ "pallet-utility", "pallet-xcm", "pallet-xcm-benchmarks", - "parachain-info", "parachains-common", "parachains-runtimes-test-utils", "parity-scale-codec", @@ -2234,6 +2233,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-tracing", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -2502,20 +2502,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "chain-spec-builder" -version = "2.0.0" -dependencies = [ - "ansi_term", - "clap 4.4.6", - "node-cli", - "rand 0.8.5", - "sc-chain-spec", - "sc-keystore", - "sp-core", - "sp-keystore", -] - [[package]] name = "chrono" version = "0.4.27" @@ -2772,7 +2758,6 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -2795,6 +2780,7 @@ dependencies = [ "sp-storage", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -2983,7 +2969,6 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-core-primitives", @@ -3005,6 +2990,7 @@ dependencies = [ "sp-storage", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -6011,7 +5997,6 @@ dependencies = [ "pallet-glutton", "pallet-sudo", "pallet-timestamp", - "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", @@ -6028,6 +6013,7 @@ dependencies = [ "sp-storage", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -8468,151 +8454,6 @@ dependencies = [ "tempfile", ] -[[package]] -name = "node-cli" -version = "3.0.0-dev" -dependencies = [ - "array-bytes 6.1.0", - "assert_cmd", - "clap 4.4.6", - "clap_complete", - "criterion 0.4.0", - "frame-benchmarking-cli", - "frame-system", - "frame-system-rpc-runtime-api", - "futures", - "jsonrpsee", - "kitchensink-runtime", - "log", - "nix 0.26.2", - "node-executor", - "node-inspect", - "node-primitives", - "node-rpc", - "pallet-asset-conversion-tx-payment", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-balances", - "pallet-im-online", - "pallet-timestamp", - "parity-scale-codec", - "platforms", - "rand 0.8.5", - "regex", - "sc-authority-discovery", - "sc-basic-authorship", - "sc-block-builder", - "sc-chain-spec", - "sc-cli", - "sc-client-api", - "sc-client-db", - "sc-consensus", - "sc-consensus-babe", - "sc-consensus-epochs", - "sc-consensus-grandpa", - "sc-consensus-slots", - "sc-executor", - "sc-keystore", - "sc-mixnet", - "sc-network", - "sc-network-common", - "sc-network-statement", - "sc-network-sync", - "sc-offchain", - "sc-rpc", - "sc-service", - "sc-service-test", - "sc-statement-store", - "sc-storage-monitor", - "sc-sync-state-rpc", - "sc-sysinfo", - "sc-telemetry", - "sc-transaction-pool", - "sc-transaction-pool-api", - "serde", - "serde_json", - "soketto", - "sp-api", - "sp-authority-discovery", - "sp-blockchain", - "sp-consensus", - "sp-consensus-babe", - "sp-consensus-grandpa", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keyring", - "sp-keystore", - "sp-mixnet", - "sp-runtime", - "sp-statement-store", - "sp-timestamp", - "sp-tracing", - "sp-transaction-storage-proof", - "substrate-build-script-utils", - "substrate-cli-test-utils", - "substrate-frame-cli", - "substrate-rpc-client", - "tempfile", - "tokio", - "tokio-util", - "try-runtime-cli", - "wait-timeout", -] - -[[package]] -name = "node-executor" -version = "3.0.0-dev" -dependencies = [ - "criterion 0.4.0", - "frame-benchmarking", - "frame-support", - "frame-system", - "futures", - "kitchensink-runtime", - "node-primitives", - "node-testing", - "pallet-balances", - "pallet-contracts", - "pallet-glutton", - "pallet-im-online", - "pallet-root-testing", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-treasury", - "parity-scale-codec", - "sc-executor", - "scale-info", - "sp-application-crypto", - "sp-consensus-babe", - "sp-core", - "sp-externalities", - "sp-keyring", - "sp-keystore", - "sp-runtime", - "sp-state-machine", - "sp-statement-store", - "sp-tracing", - "sp-trie", - "wat", -] - -[[package]] -name = "node-inspect" -version = "0.9.0-dev" -dependencies = [ - "clap 4.4.6", - "parity-scale-codec", - "sc-cli", - "sc-client-api", - "sc-service", - "sp-blockchain", - "sp-core", - "sp-runtime", - "thiserror", -] - [[package]] name = "node-primitives" version = "2.0.0" @@ -8766,7 +8607,6 @@ dependencies = [ "futures", "kitchensink-runtime", "log", - "node-executor", "node-primitives", "pallet-asset-conversion", "pallet-asset-conversion-tx-payment", @@ -8789,6 +8629,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-timestamp", + "staging-node-executor", "substrate-test-client", "tempfile", ] @@ -11135,19 +10976,6 @@ dependencies = [ "staging-xcm-builder", ] -[[package]] -name = "parachain-info" -version = "0.1.0" -dependencies = [ - "cumulus-primitives-core", - "frame-support", - "frame-system", - "parity-scale-codec", - "scale-info", - "sp-runtime", - "sp-std", -] - [[package]] name = "parachain-template-node" version = "0.1.0" @@ -11237,7 +11065,6 @@ dependencies = [ "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-xcm", - "parachain-info", "parity-scale-codec", "polkadot-parachain-primitives", "polkadot-runtime-common", @@ -11255,6 +11082,7 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -11276,7 +11104,6 @@ dependencies = [ "pallet-authorship", "pallet-balances", "pallet-collator-selection", - "parachain-info", "parity-scale-codec", "polkadot-core-primitives", "polkadot-primitives", @@ -11288,6 +11115,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "substrate-wasm-builder", @@ -11313,7 +11141,6 @@ dependencies = [ "pallet-collator-selection", "pallet-session", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-parachain-primitives", @@ -11323,6 +11150,7 @@ dependencies = [ "sp-runtime", "sp-std", "sp-tracing", + "staging-parachain-info", "staging-xcm", "staging-xcm-executor", "substrate-wasm-builder", @@ -11557,7 +11385,6 @@ dependencies = [ "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-parachain-primitives", @@ -11578,6 +11405,7 @@ dependencies = [ "sp-storage", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -14283,7 +14111,6 @@ dependencies = [ "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-xcm", - "parachain-info", "parachains-common", "parity-scale-codec", "polkadot-parachain-primitives", @@ -14301,6 +14128,7 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -16366,7 +16194,6 @@ dependencies = [ "pallet-balances", "pallet-sudo", "pallet-timestamp", - "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", @@ -16382,6 +16209,7 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "substrate-wasm-builder", ] @@ -16614,7 +16442,6 @@ dependencies = [ "frame-try-runtime", "pallet-aura", "pallet-timestamp", - "parachain-info", "parachains-common", "parity-scale-codec", "scale-info", @@ -16630,6 +16457,7 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", + "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -17865,6 +17693,178 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "staging-chain-spec-builder" +version = "2.0.0" +dependencies = [ + "ansi_term", + "clap 4.4.6", + "rand 0.8.5", + "sc-chain-spec", + "sc-keystore", + "sp-core", + "sp-keystore", + "staging-node-cli", +] + +[[package]] +name = "staging-node-cli" +version = "3.0.0-dev" +dependencies = [ + "array-bytes 6.1.0", + "assert_cmd", + "clap 4.4.6", + "clap_complete", + "criterion 0.4.0", + "frame-benchmarking-cli", + "frame-system", + "frame-system-rpc-runtime-api", + "futures", + "jsonrpsee", + "kitchensink-runtime", + "log", + "nix 0.26.2", + "node-primitives", + "node-rpc", + "pallet-asset-conversion-tx-payment", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-balances", + "pallet-im-online", + "pallet-timestamp", + "parity-scale-codec", + "platforms", + "rand 0.8.5", + "regex", + "sc-authority-discovery", + "sc-basic-authorship", + "sc-block-builder", + "sc-chain-spec", + "sc-cli", + "sc-client-api", + "sc-client-db", + "sc-consensus", + "sc-consensus-babe", + "sc-consensus-epochs", + "sc-consensus-grandpa", + "sc-consensus-slots", + "sc-executor", + "sc-keystore", + "sc-mixnet", + "sc-network", + "sc-network-common", + "sc-network-statement", + "sc-network-sync", + "sc-offchain", + "sc-rpc", + "sc-service", + "sc-service-test", + "sc-statement-store", + "sc-storage-monitor", + "sc-sync-state-rpc", + "sc-sysinfo", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde", + "serde_json", + "soketto", + "sp-api", + "sp-authority-discovery", + "sp-blockchain", + "sp-consensus", + "sp-consensus-babe", + "sp-consensus-grandpa", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-keystore", + "sp-mixnet", + "sp-runtime", + "sp-statement-store", + "sp-timestamp", + "sp-tracing", + "sp-transaction-storage-proof", + "staging-node-executor", + "staging-node-inspect", + "substrate-build-script-utils", + "substrate-cli-test-utils", + "substrate-frame-cli", + "substrate-rpc-client", + "tempfile", + "tokio", + "tokio-util", + "try-runtime-cli", + "wait-timeout", +] + +[[package]] +name = "staging-node-executor" +version = "3.0.0-dev" +dependencies = [ + "criterion 0.4.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "futures", + "kitchensink-runtime", + "node-primitives", + "node-testing", + "pallet-balances", + "pallet-contracts", + "pallet-glutton", + "pallet-im-online", + "pallet-root-testing", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-treasury", + "parity-scale-codec", + "sc-executor", + "scale-info", + "sp-application-crypto", + "sp-consensus-babe", + "sp-core", + "sp-externalities", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-state-machine", + "sp-statement-store", + "sp-tracing", + "sp-trie", + "wat", +] + +[[package]] +name = "staging-node-inspect" +version = "0.9.0-dev" +dependencies = [ + "clap 4.4.6", + "parity-scale-codec", + "sc-cli", + "sc-client-api", + "sc-service", + "sp-blockchain", + "sp-core", + "sp-runtime", + "thiserror", +] + +[[package]] +name = "staging-parachain-info" +version = "0.1.0" +dependencies = [ + "cumulus-primitives-core", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std", +] + [[package]] name = "staging-xcm" version = "1.0.0" @@ -18051,7 +18051,6 @@ dependencies = [ name = "substrate" version = "1.0.0" dependencies = [ - "chain-spec-builder", "frame-support", "sc-cli", "sc-consensus-aura", @@ -18063,6 +18062,7 @@ dependencies = [ "sc-service", "simple-mermaid", "sp-runtime", + "staging-chain-spec-builder", "subkey", ] @@ -18090,12 +18090,12 @@ dependencies = [ "assert_cmd", "futures", "nix 0.26.2", - "node-cli", "node-primitives", "regex", "sc-cli", "sc-service", "sp-rpc", + "staging-node-cli", "substrate-rpc-client", "tokio", ] diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 5dd18f0c156d..5d9752dbb206 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-client-cli" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] clap = { version = "4.4.6", features = ["derive"] } diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 1d87efa443ce..30798f848000 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-client-collator" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] parking_lot = "0.12.1" diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 8239a498746e..f440270c9822 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -4,6 +4,7 @@ description = "AURA consensus algorithm for parachains" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 26d7ba1b142c..9dfd14b1cf5e 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -4,6 +4,7 @@ description = "Cumulus specific common consensus implementations" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index 29720a8f4791..4cfba66cec37 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -4,6 +4,7 @@ description = "A Substrate `Proposer` for building parachain blocks" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] anyhow = "1.0" diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index ba077f624030..de280e6e9a89 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -4,6 +4,7 @@ description = "The relay-chain provided consensus algorithm" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index eaaf497ac3ed..08956f9f6c64 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true description = "Cumulus-specific networking protocol" edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 2ce903fd3524..e407b33e0e2e 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true description = "Cumulus-specific networking protocol" edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index bc8d0d430c77..19c99c5cb722 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true name = "cumulus-relay-chain-inprocess-interface" version = "0.1.0" edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 3da7ab0b0e82..803df7d302bf 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true name = "cumulus-relay-chain-interface" version = "0.1.0" edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] polkadot-overseer = { path = "../../../polkadot/node/overseer" } diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index f132b1a76535..6518e09cbb53 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true name = "cumulus-relay-chain-minimal-node" version = "0.1.0" edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] # polkadot deps diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 0f09377e106c..102ce75508d2 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true name = "cumulus-relay-chain-rpc-interface" version = "0.1.0" edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index b7c274ceecdc..314aebdcb9c1 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-client-service" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] futures = "0.3.28" diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index 78d25f2285e5..c9d82ead1ebc 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "AURA consensus extension pallet for parachains" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 2f3f660ea151..3d48074d5c53 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-pallet-dmp-queue" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ], default-features = false } diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index d48604d50258..76a77651cac5 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Base pallet for cumulus-based parachains" +license = "Apache-2.0" [dependencies] bytes = { version = "1.4.0", default-features = false } diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index cb5d9904c7cf..6accfa92c572 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Proc macros provided by the parachain-system pallet" +license = "Apache-2.0" [lib] proc-macro = true diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index af419cc37db0..e4ef72965c73 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Adds functionality to migrate from a Solo to a Parachain" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 853dd86bb4c8..d79e57bceacf 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true edition.workspace = true name = "cumulus-pallet-xcm" version = "0.1.0" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index b923c16cb1bf..fdb5654d59f7 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-pallet-xcmp-queue" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ], default-features = false } diff --git a/cumulus/pallets/xcmp-queue/src/bridging.rs b/cumulus/pallets/xcmp-queue/src/bridging.rs index 78ccd4b62699..9db4b6e74c39 100644 --- a/cumulus/pallets/xcmp-queue/src/bridging.rs +++ b/cumulus/pallets/xcmp-queue/src/bridging.rs @@ -55,7 +55,9 @@ impl, Runtime: crate::Config> let sibling_bridge_hub_id: ParaId = SiblingBridgeHubParaId::get(); // let's find the channel's state with the sibling parachain, - let Some((outbound_state, queued_pages)) = pallet::Pallet::::outbound_channel_state(sibling_bridge_hub_id) else { + let Some((outbound_state, queued_pages)) = + pallet::Pallet::::outbound_channel_state(sibling_bridge_hub_id) + else { return false }; // suspended channel => it is congested diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index cf6d947609d2..bab7e92ca2de 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -410,9 +410,11 @@ fn verify_fee_factor_increase_and_decrease() { assert_eq!(DeliveryFeeFactor::::get(sibling_para_id), initial); // Sending the message right now is cheap - let (_, delivery_fees) = validate_send::(destination, xcm.clone()) - .expect("message can be sent; qed"); - let Fungible(delivery_fee_amount) = delivery_fees.inner()[0].fun else { unreachable!("asset is fungible; qed"); }; + let (_, delivery_fees) = + validate_send::(destination, xcm.clone()).expect("message can be sent; qed"); + let Fungible(delivery_fee_amount) = delivery_fees.inner()[0].fun else { + unreachable!("asset is fungible; qed"); + }; assert_eq!(delivery_fee_amount, 402_000_000); let smaller_xcm = Xcm(vec![ClearOrigin; 30]); @@ -422,19 +424,23 @@ fn verify_fee_factor_increase_and_decrease() { assert_ok!(send_xcm::(destination, xcm.clone())); // Size 520 assert_eq!(DeliveryFeeFactor::::get(sibling_para_id), FixedU128::from_float(1.05)); - for _ in 0..12 { // We finish at size 929 + for _ in 0..12 { + // We finish at size 929 assert_ok!(send_xcm::(destination, smaller_xcm.clone())); } assert!(DeliveryFeeFactor::::get(sibling_para_id) > FixedU128::from_float(1.88)); // Sending the message right now is expensive - let (_, delivery_fees) = validate_send::(destination, xcm.clone()) - .expect("message can be sent; qed"); - let Fungible(delivery_fee_amount) = delivery_fees.inner()[0].fun else { unreachable!("asset is fungible; qed"); }; + let (_, delivery_fees) = + validate_send::(destination, xcm.clone()).expect("message can be sent; qed"); + let Fungible(delivery_fee_amount) = delivery_fees.inner()[0].fun else { + unreachable!("asset is fungible; qed"); + }; assert_eq!(delivery_fee_amount, 758_030_955); // Fee factor only decreases in `take_outbound_messages` - for _ in 0..5 { // We take 5 100 byte pages + for _ in 0..5 { + // We take 5 100 byte pages XcmpQueue::take_outbound_messages(1); } assert!(DeliveryFeeFactor::::get(sibling_para_id) < FixedU128::from_float(1.72)); diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/cumulus/parachain-template/runtime/Cargo.toml index 2e5bdab1e212..06e818dcdbc5 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/cumulus/parachain-template/runtime/Cargo.toml @@ -71,7 +71,7 @@ cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-feature cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../parachains/pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } [features] default = [ "std" ] diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 92c7cb6ef121..ebb3cdeaa5d3 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true description = "Logic which is common to all parachain runtimes" +license = "Apache-2.0" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -40,7 +41,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/x pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } -parachain-info = { path = "../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../pallets/parachain-info", default-features = false } [dev-dependencies] pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index e85112ed8ea2..e3f8023f4199 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors = ["Parity Technologies "] edition = "2021" description = "Managed content" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index c63101bab917..40f1a07c2dd6 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -1,8 +1,9 @@ [package] authors.workspace = true edition.workspace = true -name = "parachain-info" +name = "staging-parachain-info" version = "0.1.0" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 3acad9f371d3..0ea424e1a2dc 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true edition.workspace = true name = "cumulus-ping" version = "0.1.0" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml index ede9c6af35a0..bde9cce75311 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml @@ -4,6 +4,7 @@ version = "0.9.420" authors.workspace = true edition.workspace = true description = "Kusama variant of Asset Hub parachain runtime" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -75,7 +76,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } assets-common = { path = "../common", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml index db400f2977f4..4c3651eb8cf1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml @@ -4,6 +4,7 @@ version = "0.9.420" authors.workspace = true edition.workspace = true description = "Asset Hub Polkadot parachain runtime" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -70,7 +71,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } assets-common = { path = "../common", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index ebf811e54635..d8a8f224aae4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -4,6 +4,7 @@ version = "0.9.420" authors.workspace = true edition.workspace = true description = "Rococo variant of Asset Hub parachain runtime" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -75,7 +76,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } assets-common = { path = "../common", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 7c7a07314596..95e46f31243b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -4,6 +4,7 @@ version = "0.9.420" authors.workspace = true edition.workspace = true description = "Westend variant of Asset Hub parachain runtime" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -74,7 +75,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } assets-common = { path = "../common", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index 770acc93c71d..a7f2b86a6c82 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Assets common utilities" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 86cc72e2dd3c..0bcbe79691cf 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true description = "Test utils for Asset Hub runtimes." +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -30,7 +31,7 @@ assets-common = { path = "../common", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../../../primitives/parachain-inherent", default-features = false } cumulus-test-relay-sproof-builder = { path = "../../../../test/relay-sproof-builder", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } # Polkadot diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml index 603e74850cc7..8572c9ba3d0e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Kusama's BridgeHub parachain runtime" +license = "Apache-2.0" [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -68,7 +69,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [dev-dependencies] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml index 535a05169977..b9b0f50006e3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Polkadot's BridgeHub parachain runtime" +license = "Apache-2.0" [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -68,7 +69,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [dev-dependencies] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 8c4e1612780f..c7e31c80ce44 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Rococo's BridgeHub parachain runtime" +license = "Apache-2.0" [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } @@ -69,7 +70,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } # Bridges diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 2ad79bb24886..40d7741fb381 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Utils for BridgeHub testing" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -29,7 +30,7 @@ cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-fea cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml index 73d787caf864..b5f22c828e7e 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true description = "Polkadot Collectives Parachain Runtime" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -73,7 +74,7 @@ cumulus-primitives-core = { path = "../../../../primitives/core", default-featur cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [build-dependencies] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index 0eb2428f358a..1ee069d5cc3c 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -4,6 +4,7 @@ version = "0.2.0" description = "Parachain testnet runtime for FRAME Contracts pallet." authors.workspace = true edition.workspace = true +license = "Apache-2.0" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -73,7 +74,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [features] diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml index f5362e4d6b24..6051d029a1de 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" description = "Glutton parachain runtime." authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } @@ -47,7 +48,7 @@ cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = fals cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [build-dependencies] diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 18bee9982d0f..65ca58ac8b39 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" description = "Seedling parachain runtime. A starter runtime for solochain to parachain migration." authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } @@ -36,7 +37,7 @@ cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [build-dependencies] diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index ef4b62f985db..b9f63133ccec 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" description = "A minimal runtime to test Relay Chain consensus." authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } @@ -39,7 +40,7 @@ cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-featu cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [build-dependencies] diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index c455807fd8f8..9cc4b604f33c 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true description = "Utils for Runtimes testing" +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } @@ -27,7 +28,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../pallets/xcmp-queue", default-feat cumulus-pallet-dmp-queue = { path = "../../../pallets/dmp-queue", default-features = false } pallet-collator-selection = { path = "../../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../common", default-features = false } -parachain-info = { path = "../../pallets/parachain-info", default-features = false } +parachain-info = {package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } assets-common = { path = "../assets/common", default-features = false } cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent", default-features = false } diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 13e52f8a3ba6..d5db4a020344 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -72,7 +72,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } [features] diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 616d92b6940a..4f45b3ea89b5 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Simple runtime used by the rococo parachain(s)" -publish = false +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } @@ -54,7 +54,7 @@ cumulus-ping = { path = "../../../pallets/ping", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } parachains-common = { path = "../../../common", default-features = false } -parachain-info = { path = "../../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 3c2069c81ef4..6cacb5d764ae 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -5,6 +5,7 @@ authors.workspace = true build = "build.rs" edition.workspace = true description = "Runs a polkadot parachain node which could be a collator." +license = "Apache-2.0" [[bin]] name = "polkadot-parachain" diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 791ec17378a8..168c85b2efb5 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-primitives-aura" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 3ce7b1da4a6c..6c923a700ece 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-primitives-core" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 026d5a61bc8b..5a448f65ada3 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-primitives-parachain-inherent" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] async-trait = { version = "0.1.73", optional = true } diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index aed51a449127..a0fea51f8db1 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Provides timestamp related functionality for parachains." +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index d50f93d89b76..c159bca5d2a5 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-primitives-utility" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index e044b92f7c4a..8807e4e28589 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-test-relay-sproof-builder" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index c77d350bdfef..5d43b48ea32f 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -4,6 +4,7 @@ description = "Test kit to emulate XCM program execution." version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } diff --git a/substrate/Cargo.toml b/substrate/Cargo.toml index 9e2e0b1a6eec..47cde0ca70c5 100644 --- a/substrate/Cargo.toml +++ b/substrate/Cargo.toml @@ -7,13 +7,14 @@ repository.workspace = true authors.workspace = true edition.workspace = true version = "1.0.0" +publish = false # The dependencies are only needed for docs. [dependencies] simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", rev = "e48b187bcfd5cc75111acd9d241f1bd36604344b" } subkey = { path = "bin/utils/subkey" } -chain-spec-builder = { path = "bin/utils/chain-spec-builder" } +chain-spec-builder = { package = "staging-chain-spec-builder", path = "bin/utils/chain-spec-builder" } sc-service = { path = "client/service" } sc-cli = { path = "client/cli" } diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 49dc39099be0..34cca4495da4 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-cli" +name = "staging-node-cli" version = "3.0.0-dev" authors.workspace = true description = "Generic Substrate node implementation in Rust." @@ -101,12 +101,12 @@ pallet-im-online = { path = "../../../frame/im-online", default-features = false kitchensink-runtime = { path = "../runtime" } node-rpc = { path = "../rpc" } node-primitives = { path = "../primitives" } -node-executor = { path = "../executor" } +node-executor = { package = "staging-node-executor", path = "../executor" } # CLI-specific dependencies sc-cli = { path = "../../../client/cli", optional = true} frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} -node-inspect = { path = "../inspect", optional = true} +node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true} try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true} serde_json = "1.0.107" @@ -139,7 +139,7 @@ substrate-cli-test-utils = { path = "../../../test-utils/cli" } [build-dependencies] clap = { version = "4.4.6", optional = true } clap_complete = { version = "4.0.2", optional = true } -node-inspect = { path = "../inspect", optional = true} +node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true} frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true} substrate-frame-cli = { path = "../../../utils/frame/frame-utilities-cli", optional = true} diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 246de8f3e925..a22aa365e042 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -39,6 +39,7 @@ use sp_runtime::{ transaction_validity::{InvalidTransaction, TransactionValidityError}, AccountId32, MultiAddress, OpaqueExtrinsic, }; +use staging_node_cli as node_cli; use tokio::runtime::Handle; fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index 47f890574151..dd6c237d4dd6 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -35,6 +35,7 @@ use sc_transaction_pool_api::{TransactionPool as _, TransactionSource, Transacti use sp_core::{crypto::Pair, sr25519}; use sp_keyring::Sr25519Keyring; use sp_runtime::OpaqueExtrinsic; +use staging_node_cli as node_cli; use tokio::runtime::Handle; fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { diff --git a/substrate/bin/node/cli/bin/main.rs b/substrate/bin/node/cli/bin/main.rs index 4b434a3e6dad..ccc7d7b6b112 100644 --- a/substrate/bin/node/cli/bin/main.rs +++ b/substrate/bin/node/cli/bin/main.rs @@ -20,6 +20,8 @@ #![warn(missing_docs)] +use staging_node_cli as node_cli; + fn main() -> sc_cli::Result<()> { node_cli::run() } diff --git a/substrate/bin/node/executor/Cargo.toml b/substrate/bin/node/executor/Cargo.toml index f73d97eb8cf1..5f43b5839e61 100644 --- a/substrate/bin/node/executor/Cargo.toml +++ b/substrate/bin/node/executor/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-executor" +name = "staging-node-executor" version = "3.0.0-dev" authors.workspace = true description = "Substrate node implementation in Rust." diff --git a/substrate/bin/node/executor/benches/bench.rs b/substrate/bin/node/executor/benches/bench.rs index 1c9c002492cf..95c8afd55066 100644 --- a/substrate/bin/node/executor/benches/bench.rs +++ b/substrate/bin/node/executor/benches/bench.rs @@ -35,6 +35,7 @@ use sp_core::{ }; use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; +use staging_node_executor as node_executor; criterion_group!(benches, bench_execute_block); criterion_main!(benches); diff --git a/substrate/bin/node/executor/tests/common.rs b/substrate/bin/node/executor/tests/common.rs index 6ce9ea3a0109..5f88ba85adf9 100644 --- a/substrate/bin/node/executor/tests/common.rs +++ b/substrate/bin/node/executor/tests/common.rs @@ -42,6 +42,7 @@ use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; use sp_externalities::Externalities; +use staging_node_executor as node_executor; pub const TEST_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"test"); diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 4a92db291858..30cc22b0e8c6 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-inspect" +name = "staging-node-inspect" version = "0.9.0-dev" authors.workspace = true description = "Substrate node block inspection tool." @@ -7,7 +7,6 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index f5a39693301c..68f80ab6e83f 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.21" log = "0.4.17" tempfile = "3.1.0" frame-system = { path = "../../../frame/system" } -node-executor = { path = "../executor" } +node-executor = { package = "staging-node-executor", path = "../executor" } node-primitives = { path = "../primitives" } kitchensink-runtime = { path = "../runtime" } pallet-asset-conversion = { path = "../../../frame/asset-conversion" } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index c7690faf7d06..f25358e52c2c 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "chain-spec-builder" +name = "staging-chain-spec-builder" version = "2.0.0" authors.workspace = true edition.workspace = true @@ -7,7 +7,6 @@ build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true -readme = "README.md" publish = false [package.metadata.docs.rs] @@ -24,7 +23,7 @@ crate-type = ["rlib"] ansi_term = "0.12.1" clap = { version = "4.4.6", features = ["derive"] } rand = "0.8" -node-cli = { path = "../../node/cli" } +node-cli = { package = "staging-node-cli", path = "../../node/cli" } sc-chain-spec = { path = "../../../client/chain-spec" } sc-keystore = { path = "../../../client/keystore" } sp-core = { path = "../../../primitives/core" } diff --git a/substrate/bin/utils/chain-spec-builder/bin/main.rs b/substrate/bin/utils/chain-spec-builder/bin/main.rs index 53e11abbf628..a002f6dc0e7b 100644 --- a/substrate/bin/utils/chain-spec-builder/bin/main.rs +++ b/substrate/bin/utils/chain-spec-builder/bin/main.rs @@ -23,6 +23,7 @@ use clap::Parser; use node_cli::chain_spec; use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; use sp_core::{crypto::Ss58Codec, sr25519}; +use staging_chain_spec_builder as chain_spec_builder; use std::fs; fn main() -> Result<(), String> { diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 8de62aca5ec4..734bc5ef43f5 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -7,7 +7,6 @@ homepage = "https://substrate.io" edition.workspace = true license = "Apache-2.0" repository.workspace = true -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index d525008e5255..854ee8b55c88 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -7,7 +7,6 @@ license = "MIT-0" homepage = "https://substrate.io" repository.workspace = true description = "FRAME example pallet with umbrella crate" -readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/frame/support/procedural/src/pallet/expand/warnings.rs b/substrate/frame/support/procedural/src/pallet/expand/warnings.rs index 6ce2097c2684..030e3ddaf323 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/warnings.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/warnings.rs @@ -33,9 +33,7 @@ pub(crate) fn weight_witness_warning( if dev_mode { return } - let CallWeightDef::Immediate(w) = &method.weight else { - return - }; + let CallWeightDef::Immediate(w) = &method.weight else { return }; let partial_warning = Warning::new_deprecated("UncheckedWeightWitness") .old("not check weight witness data") @@ -66,9 +64,7 @@ pub(crate) fn weight_constant_warning( if dev_mode { return } - let syn::Expr::Lit(lit) = weight else { - return - }; + let syn::Expr::Lit(lit) = weight else { return }; let warning = Warning::new_deprecated("ConstantWeight") .index(warnings.len()) diff --git a/substrate/scripts/ci/deny.toml b/substrate/scripts/ci/deny.toml index ca059e384a35..1afb4a4f693d 100644 --- a/substrate/scripts/ci/deny.toml +++ b/substrate/scripts/ci/deny.toml @@ -38,7 +38,7 @@ exceptions = [ { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "chain-spec-builder" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "mmr-gadget" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-bench" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-cli" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "staging-node-cli" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-inspect" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-template-release" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-testing" }, diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index 9c4167c9b6e1..022db32c34f1 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -20,7 +20,7 @@ nix = "0.26.2" regex = "1.7.3" tokio = { version = "1.22.0", features = ["full"] } node-primitives = { path = "../../bin/node/primitives" } -node-cli = { path = "../../bin/node/cli" } +node-cli = { package = "staging-node-cli", path = "../../bin/node/cli" } sc-cli = { path = "../../client/cli" } sc-service = { path = "../../client/service" } futures = "0.3.28" diff --git a/substrate/test-utils/cli/src/lib.rs b/substrate/test-utils/cli/src/lib.rs index 99119a44d2e9..d77a89b4dbf4 100644 --- a/substrate/test-utils/cli/src/lib.rs +++ b/substrate/test-utils/cli/src/lib.rs @@ -135,7 +135,7 @@ pub fn build_substrate(args: &[&str]) { // Get the root workspace directory from the CARGO_MANIFEST_DIR environment variable let mut cmd = Command::new("cargo"); - cmd.arg("build").arg("-p=node-cli"); + cmd.arg("build").arg("-p=staging-node-cli"); if is_release_build { cmd.arg("--release"); From b53a93a6762324b66a4e67add5fdfc65c9897f07 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 1 Nov 2023 09:15:19 +0100 Subject: [PATCH 49/69] Bump ec-utils version (#2104) --- Cargo.lock | 2 +- substrate/primitives/crypto/ec-utils/Cargo.toml | 2 +- substrate/primitives/crypto/ec-utils/src/lib.rs | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2b9b9ec45d6..1f22fccd2c1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17085,7 +17085,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" -version = "0.4.0" +version = "0.4.1" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", diff --git a/substrate/primitives/crypto/ec-utils/Cargo.toml b/substrate/primitives/crypto/ec-utils/Cargo.toml index 651fc96d7ac1..3c84c17a5c20 100644 --- a/substrate/primitives/crypto/ec-utils/Cargo.toml +++ b/substrate/primitives/crypto/ec-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-crypto-ec-utils" -version = "0.4.0" +version = "0.4.1" authors.workspace = true description = "Host functions for common Arkworks elliptic curve operations" edition.workspace = true diff --git a/substrate/primitives/crypto/ec-utils/src/lib.rs b/substrate/primitives/crypto/ec-utils/src/lib.rs index e3aea98faa1e..970ad71765a5 100644 --- a/substrate/primitives/crypto/ec-utils/src/lib.rs +++ b/substrate/primitives/crypto/ec-utils/src/lib.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Elliptic curves which are mostly compatible with *Arkworks* library -//! mostly useful in non-native contexts. +//! This crate offers elliptic curves types which are compatible with the +//! [Arkworks](https://github.com/arkworks-rs) library functionalities. //! -//! The definitions make use of host functions to offload the non-native -//! computational environment from the some of the most computationally -//! expensive operations by internally leveraging the +//! The implementation has been primarily designed to be used in slow hosted +//! targets (e.g. wasm32) and offloads the most computationally expensive +//! operations to the host by leveraging the //! [arkworks-extensions](https://github.com/paritytech/arkworks-extensions) -//! library. +//! library and Substrate's host functions. //! //! The exported types are organized and named in a way that mirrors the structure //! of the types in the original Arkworks library. This design choice aims to make From 37f3269c4513827cd1473bce49b0401cc285f5bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 09:50:39 +0100 Subject: [PATCH 50/69] Bump chevdor/srtool-actions from 0.8.0 to 0.9.0 (#2089) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [chevdor/srtool-actions](https://github.com/chevdor/srtool-actions) from 0.8.0 to 0.9.0.

Release notes

Sourced from chevdor/srtool-actions's releases.

v0.9.0

What's Changed

Full Changelog: https://github.com/chevdor/srtool-actions/compare/v0.8.0...v0.9.0

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=chevdor/srtool-actions&package-manager=github_actions&previous-version=0.8.0&new-version=0.9.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-and-attach-release-runtimes.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-attach-release-runtimes.yml b/.github/workflows/build-and-attach-release-runtimes.yml index 297f7a1665b2..c7cd4b34384a 100644 --- a/.github/workflows/build-and-attach-release-runtimes.yml +++ b/.github/workflows/build-and-attach-release-runtimes.yml @@ -33,7 +33,7 @@ jobs: - name: Build ${{ matrix.runtime.name }} ${{ matrix.build_config.type }} id: srtool_build - uses: chevdor/srtool-actions@v0.8.0 + uses: chevdor/srtool-actions@v0.9.0 env: BUILD_OPTS: ${{ matrix.build_config.opts }} with: From 9ca267328e0cba753aa528615cc19d1f633ed764 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Wed, 1 Nov 2023 12:28:16 +0100 Subject: [PATCH 51/69] upgraded review-bot to 2.2.0 (#2097) This version includes paritytech/review-bot#97 which can assign reviewers. It will be the final step required to replace PRCR. It also moves the secrets to the environment master. --- .github/workflows/review-bot.yml | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml index 178193da3389..5970989cde09 100644 --- a/.github/workflows/review-bot.yml +++ b/.github/workflows/review-bot.yml @@ -6,12 +6,10 @@ on: types: - completed -permissions: - contents: read - jobs: review-approvals: runs-on: ubuntu-latest + environment: master steps: - name: Extract content of artifact id: number @@ -19,15 +17,16 @@ jobs: with: artifact-name: pr_number - name: Generate token - id: team_token + id: app_token uses: tibdex/github-app-token@v1 with: app_id: ${{ secrets.REVIEW_APP_ID }} private_key: ${{ secrets.REVIEW_APP_KEY }} - name: "Evaluates PR reviews and assigns reviewers" - uses: paritytech/review-bot@v2.1.0 + uses: paritytech/review-bot@v2.2.0 with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - team-token: ${{ steps.team_token.outputs.token }} - checks-token: ${{ steps.team_token.outputs.token }} + repo-token: ${{ steps.app_token.outputs.token }} + team-token: ${{ steps.app_token.outputs.token }} + checks-token: ${{ steps.app_token.outputs.token }} pr-number: ${{ steps.number.outputs.content }} + request-reviewers: true From b67cb0f8ab13b20aba82c19d4b08ae84f9f71efc Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 1 Nov 2023 09:43:54 +0200 Subject: [PATCH 52/69] xcm-emulator: configure penpal for asset transfers and enhance existing tests --- .../src/tests/reserve_transfer.rs | 7 ++- .../asset-hub-rococo/src/tests/teleport.rs | 2 +- .../asset-hub-westend/src/tests/teleport.rs | 2 +- .../emulated/common/src/constants.rs | 15 +++-- .../runtimes/testing/penpal/src/xcm_config.rs | 56 ++++++------------- 5 files changed, 33 insertions(+), 49 deletions(-) diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index a1b604fad02c..903c01ee921c 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -166,6 +166,7 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { let mut test = SystemParaToParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; test.set_assertion::(system_para_to_para_assertions); // TODO: Add assertion for Penpal runtime. Right now message is failing with @@ -174,6 +175,7 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::< @@ -181,9 +183,10 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } /// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs index 0d2ca6852470..297ee83d7535 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs @@ -51,7 +51,7 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { assert_expected_events!( Rococo, vec![ - // Amount is witdrawn from Relay Chain's `CheckAccount` + // Amount is withdrawn from Relay Chain's `CheckAccount` RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { who: *who == ::XcmPallet::check_account(), amount: *amount == t.args.amount, diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs index 4fe0062dafcd..aca41322c3cf 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs @@ -53,7 +53,7 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { assert_expected_events!( Westend, vec![ - // Amount is witdrawn from Relay Chain's `CheckAccount` + // Amount is withdrawn from Relay Chain's `CheckAccount` RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { who: *who == ::XcmPallet::check_account(), amount: *amount == t.args.amount, diff --git a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs b/cumulus/parachains/integration-tests/emulated/common/src/constants.rs index 93abae753b94..6b2824bd09f2 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/constants.rs @@ -386,6 +386,7 @@ pub mod asset_hub_westend { use super::*; pub const PARA_ID: u32 = 1000; pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; + const ENDOWMENT: u128 = ED * 4096; pub fn genesis() -> Storage { let genesis_config = asset_hub_westend_runtime::RuntimeGenesisConfig { @@ -399,7 +400,7 @@ pub mod asset_hub_westend { balances: accounts::init_balances() .iter() .cloned() - .map(|k| (k, ED * 4096)) + .map(|k| (k, ENDOWMENT)) .collect(), }, parachain_info: asset_hub_westend_runtime::ParachainInfoConfig { @@ -442,6 +443,7 @@ pub mod asset_hub_rococo { use super::*; pub const PARA_ID: u32 = 1000; pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; + const ENDOWMENT: u128 = ED * 4096 * 4096; pub fn genesis() -> Storage { let genesis_config = asset_hub_rococo_runtime::RuntimeGenesisConfig { @@ -455,7 +457,7 @@ pub mod asset_hub_rococo { balances: accounts::init_balances() .iter() .cloned() - .map(|k| (k, ED * 4096 * 4096)) + .map(|k| (k, ENDOWMENT)) .collect(), }, parachain_info: asset_hub_rococo_runtime::ParachainInfoConfig { @@ -498,6 +500,7 @@ pub mod asset_hub_wococo { use super::*; pub const PARA_ID: u32 = 1000; pub const ED: Balance = parachains_common::wococo::currency::EXISTENTIAL_DEPOSIT; + const ENDOWMENT: u128 = ED * 4096; pub fn genesis() -> Storage { let genesis_config = asset_hub_rococo_runtime::RuntimeGenesisConfig { @@ -511,7 +514,7 @@ pub mod asset_hub_wococo { balances: accounts::init_balances() .iter() .cloned() - .map(|k| (k, ED * 4096)) + .map(|k| (k, ENDOWMENT)) .collect(), }, parachain_info: asset_hub_rococo_runtime::ParachainInfoConfig { @@ -556,6 +559,7 @@ pub mod penpal { pub const PARA_ID_A: u32 = 2000; pub const PARA_ID_B: u32 = 2001; pub const ED: Balance = penpal_runtime::EXISTENTIAL_DEPOSIT; + const ENDOWMENT: u128 = ED * 4096; pub fn genesis(para_id: u32) -> Storage { let genesis_config = penpal_runtime::RuntimeGenesisConfig { @@ -569,7 +573,7 @@ pub mod penpal { balances: accounts::init_balances() .iter() .cloned() - .map(|k| (k, ED * 4096)) + .map(|k| (k, ENDOWMENT)) .collect(), }, parachain_info: penpal_runtime::ParachainInfoConfig { @@ -616,6 +620,7 @@ pub mod bridge_hub_rococo { use super::*; pub const PARA_ID: u32 = 1013; pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; + const ENDOWMENT: u128 = ED * 4096; pub fn genesis() -> Storage { let genesis_config = bridge_hub_rococo_runtime::RuntimeGenesisConfig { @@ -629,7 +634,7 @@ pub mod bridge_hub_rococo { balances: accounts::init_balances() .iter() .cloned() - .map(|k| (k, ED * 4096)) + .map(|k| (k, ENDOWMENT)) .collect(), }, parachain_info: bridge_hub_rococo_runtime::ParachainInfoConfig { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 542d07fbed95..7dde53452d35 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -182,7 +182,7 @@ pub type Barrier = TrailingSetTopicAsId< /// Type alias to conveniently refer to `frame_system`'s `Config::AccountId`. pub type AccountIdOf = ::AccountId; -/// Asset filter that allows all assets from a certain location. +/// Asset filter that allows all assets from a certain location matching asset id. pub struct AssetsFrom(PhantomData); impl> ContainsPair for AssetsFrom { fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { @@ -193,6 +193,17 @@ impl> ContainsPair for AssetsFr } } +/// Asset filter that allows native/relay asset if coming from a certain location. +pub struct NativeAssetFrom(PhantomData); +impl> ContainsPair for NativeAssetFrom { + fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { + let loc = T::get(); + &loc == origin && + matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } + if *asset_loc == MultiLocation::from(Parent)) + } +} + /// Allow checking in assets that have issuance > 0. pub struct NonZeroIssuance(PhantomData<(AccountId, Assets)>); impl Contains<>::AssetId> @@ -221,43 +232,6 @@ where } } -pub trait Reserve { - /// Returns assets reserve location. - fn reserve(&self) -> Option; -} - -// Takes the chain part of a MultiAsset -impl Reserve for MultiAsset { - fn reserve(&self) -> Option { - if let AssetId::Concrete(location) = self.id { - let first_interior = location.first_interior(); - let parents = location.parent_count(); - match (parents, first_interior) { - (0, Some(Parachain(id))) => Some(MultiLocation::new(0, X1(Parachain(*id)))), - (1, Some(Parachain(id))) => Some(MultiLocation::new(1, X1(Parachain(*id)))), - (1, _) => Some(MultiLocation::parent()), - _ => None, - } - } else { - None - } - } -} - -/// A `FilterAssetLocation` implementation. Filters multi native assets whose -/// reserve is same with `origin`. -pub struct MultiNativeAsset; -impl ContainsPair for MultiNativeAsset { - fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { - if let Some(ref reserve) = asset.reserve() { - if reserve == origin { - return true - } - } - false - } -} - parameter_types! { /// The location that this chain recognizes as the Relay network's Asset Hub. pub SystemAssetHubLocation: MultiLocation = MultiLocation::new(1, X1(Parachain(1000))); @@ -268,7 +242,8 @@ parameter_types! { pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } -pub type Reserves = (NativeAsset, AssetsFrom); +pub type Reserves = + (NativeAsset, AssetsFrom, NativeAssetFrom); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { @@ -277,7 +252,8 @@ impl xcm_executor::Config for XcmConfig { // How to withdraw and deposit an asset. type AssetTransactor = AssetTransactors; type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = MultiNativeAsset; // TODO: maybe needed to be replaced by Reserves + type IsReserve = Reserves; + // no teleport trust established with other chains type IsTeleporter = NativeAsset; type UniversalLocation = UniversalLocation; type Barrier = Barrier; From ec87ce6d97c85c5c50a9205bbc7d2a30689f1a07 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 1 Nov 2023 11:00:16 +0200 Subject: [PATCH 53/69] xcm-emulator: add relay to penpal native transfer test --- Cargo.lock | 3 + .../assets/asset-hub-rococo/Cargo.toml | 2 + .../assets/asset-hub-rococo/src/lib.rs | 20 ++-- .../src/tests/reserve_transfer.rs | 109 +++++++++++++++++- .../asset-hub-rococo/src/tests/teleport.rs | 8 +- .../assets/asset-hub-westend/Cargo.toml | 1 + .../src/tests/reserve_transfer.rs | 29 +++-- 7 files changed, 147 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6af9c3efdf56..0c35615ddaa4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -879,6 +879,8 @@ dependencies = [ "assert_matches", "asset-hub-rococo-runtime", "asset-test-utils", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-xcmp-queue", "frame-support", "frame-system", "integration-tests-common", @@ -985,6 +987,7 @@ dependencies = [ "asset-test-utils", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", "frame-support", "frame-system", "integration-tests-common", diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml index db58d8d33039..5b8ad06d63bd 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml @@ -32,6 +32,8 @@ rococo-runtime = { path = "../../../../../../polkadot/runtime/rococo", default-f asset-test-utils = { path = "../../../../runtimes/assets/test-utils", default-features = false } parachains-common = { path = "../../../../common" } asset-hub-rococo-runtime = { path = "../../../../runtimes/assets/asset-hub-rococo" } +cumulus-pallet-dmp-queue = { path = "../../../../../pallets/dmp-queue", default-features = false} +cumulus-pallet-xcmp-queue = { path = "../../../../../pallets/xcmp-queue", default-features = false} # Local xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false} diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs index 42f54bdf49df..686d9e3f0d36 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs @@ -49,18 +49,20 @@ pub const ASSET_MIN_BALANCE: u128 = 1000; pub const ASSETS_PALLET_ID: u8 = 50; pub type RelayToSystemParaTest = Test; +pub type RelayToParaTest = Test; pub type SystemParaToRelayTest = Test; pub type SystemParaToParaTest = Test; +pub type ParaToSystemParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests -pub fn relay_test_args(amount: Balance) -> TestArgs { +/// Returns a `TestArgs` instance to de used for the Relay Chain across integration tests +pub fn relay_test_args( + dest: MultiLocation, + beneficiary_id: AccountId32, + amount: Balance, +) -> TestArgs { TestArgs { - dest: Rococo::child_location_of(AssetHubRococo::para_id()), - beneficiary: AccountId32Junction { - network: None, - id: AssetHubRococoReceiver::get().into(), - } - .into(), + dest, + beneficiary: AccountId32Junction { network: None, id: beneficiary_id.into() }.into(), amount, assets: (Here, amount).into(), asset_id: None, @@ -69,7 +71,7 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to de used for the System Parachain accross integraton tests +/// Returns a `TestArgs` instance to de used for the System Parachain across integration tests pub fn system_para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 903c01ee921c..08873a9ce548 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -15,8 +15,45 @@ use crate::*; use asset_hub_rococo_runtime::xcm_config::XcmConfig as AssetHubRococoXcmConfig; +use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; -fn system_para_to_para_assertions(t: SystemParaToParaTest) { +fn relay_to_para_sender_assertions(t: RelayToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); + + assert_expected_events!( + Rococo, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { + from: *from == t.sender.account_id, + to: *to == Rococo::sovereign_account_id_of( + t.args.dest + ), + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn relay_to_para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalRococoA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::DmpQueue(cumulus_pallet_dmp_queue::Event::ExecutedDownward { + outcome: Outcome::Complete(_), + .. + }) => {}, + ] + ); +} + +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -27,7 +64,7 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Balances( pallet_balances::Event::Transfer { from, to, amount } ) => { @@ -41,6 +78,17 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { ); } +fn system_para_to_para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalRococoA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::Success { .. }) => {}, + ] + ); +} + fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -52,7 +100,7 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Assets( pallet_assets::Event::Transferred { asset_id, from, to, amount } ) => { @@ -67,6 +115,17 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { ); } +fn relay_to_para_limited_reserve_transfer_assets(t: RelayToParaTest) -> DispatchResult { + ::XcmPallet::limited_reserve_transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, @@ -148,6 +207,45 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_relay_fails() { }); } +/// Limited Reserve Transfers of native asset from Relay to Parachain should work +#[test] +fn limited_reserve_transfer_native_asset_from_relay_to_para() { + // Init values for Relay + let destination = Rococo::child_location_of(PenpalRococoA::para_id()); + let beneficiary_id = PenpalRococoAReceiver::get(); + let amount_to_send: Balance = ROCOCO_ED * 1000; + + let test_args = TestContext { + sender: RococoSender::get(), + receiver: PenpalRococoAReceiver::get(), + args: relay_test_args(destination, beneficiary_id, amount_to_send), + }; + + let mut test = RelayToParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(relay_to_para_sender_assertions); + test.set_assertion::(relay_to_para_receiver_assertions); + test.set_dispatchable::(relay_to_para_limited_reserve_transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + let delivery_fees = Rococo::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + /// Limited Reserve Transfers of native asset from System Parachain to Parachain should work #[test] fn limited_reserve_transfer_native_asset_from_system_para_to_para() { @@ -168,9 +266,8 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { let sender_balance_before = test.sender.balance; let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(system_para_to_para_receiver_assertions); test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); test.assert(); diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs index 297ee83d7535..09f1e6c0a590 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs @@ -157,10 +157,12 @@ fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { fn limited_teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let test_args = TestContext { sender: RococoSender::get(), receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + args: relay_test_args(dest, beneficiary_id, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -278,10 +280,12 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { fn teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let test_args = TestContext { sender: RococoSender::get(), receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + args: relay_test_args(dest, beneficiary_id, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml index 4b6b8874b6a4..87f3c1d4b513 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml @@ -39,6 +39,7 @@ asset-hub-westend-runtime = { path = "../../../../runtimes/assets/asset-hub-west asset-test-utils = { path = "../../../../runtimes/assets/test-utils", default-features = false } cumulus-pallet-dmp-queue = { default-features = false, path = "../../../../../pallets/dmp-queue" } cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../pallets/parachain-system" } +cumulus-pallet-xcmp-queue = { path = "../../../../../pallets/xcmp-queue", default-features = false} # Local xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false} diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs index c7a25dde78d3..518e117c1074 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -16,7 +16,7 @@ use crate::*; use asset_hub_westend_runtime::xcm_config::XcmConfig; -fn system_para_to_para_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -27,7 +27,7 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Balances( pallet_balances::Event::Transfer { from, to, amount } ) => { @@ -41,6 +41,17 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { ); } +fn para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalWestendA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::Success { .. }) => {}, + ] + ); +} + fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -52,7 +63,7 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Assets( pallet_assets::Event::Transferred { asset_id, from, to, amount } ) => { @@ -165,14 +176,15 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { let mut test = SystemParaToParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(para_receiver_assertions); test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::<::XcmSender>( @@ -184,9 +196,10 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { ) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } /// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work From da3c12e89f5e21aaabe43b8be58c77e65d2a7b97 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 1 Nov 2023 13:47:04 +0200 Subject: [PATCH 54/69] xcm-emulator: add ah to penpal native asset transfer test --- Cargo.lock | 1 + .../assets/asset-hub-rococo/Cargo.toml | 1 + .../assets/asset-hub-rococo/src/lib.rs | 6 +- .../src/tests/reserve_transfer.rs | 113 +++++++++++++++++- .../asset-hub-rococo/src/tests/teleport.rs | 8 +- .../assets/asset-hub-westend/src/lib.rs | 4 +- .../bridges/bridge-hub-rococo/src/lib.rs | 2 +- .../emulated/common/src/lib.rs | 2 + 8 files changed, 123 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0c35615ddaa4..066cc394bd82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -890,6 +890,7 @@ dependencies = [ "pallet-xcm", "parachains-common", "parity-scale-codec", + "penpal-runtime", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-runtime-parachains", diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml index 5b8ad06d63bd..820429deae45 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml @@ -32,6 +32,7 @@ rococo-runtime = { path = "../../../../../../polkadot/runtime/rococo", default-f asset-test-utils = { path = "../../../../runtimes/assets/test-utils", default-features = false } parachains-common = { path = "../../../../common" } asset-hub-rococo-runtime = { path = "../../../../runtimes/assets/asset-hub-rococo" } +penpal-runtime = { path = "../../../../runtimes/testing/penpal" } cumulus-pallet-dmp-queue = { path = "../../../../../pallets/dmp-queue", default-features = false} cumulus-pallet-xcmp-queue = { path = "../../../../../pallets/xcmp-queue", default-features = false} diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs index 686d9e3f0d36..6da7537e82f4 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs @@ -54,7 +54,7 @@ pub type SystemParaToRelayTest = Test; pub type SystemParaToParaTest = Test; pub type ParaToSystemParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain across integration tests +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests pub fn relay_test_args( dest: MultiLocation, beneficiary_id: AccountId32, @@ -71,8 +71,8 @@ pub fn relay_test_args( } } -/// Returns a `TestArgs` instance to de used for the System Parachain across integration tests -pub fn system_para_test_args( +/// Returns a `TestArgs` instance to be used by parachains across integration tests +pub fn para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, amount: Balance, diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 08873a9ce548..14b2aae7a8e0 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -15,6 +15,7 @@ use crate::*; use asset_hub_rococo_runtime::xcm_config::XcmConfig as AssetHubRococoXcmConfig; +use penpal_runtime::xcm_config::XcmConfig as PenpalRococoXcmConfig; use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; fn relay_to_para_sender_assertions(t: RelayToParaTest) { @@ -89,6 +90,51 @@ fn system_para_to_para_receiver_assertions(_: Test) { ); } +fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + PenpalRococoA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( + 864_610_000, + 8_799, + ))); + + assert_expected_events!( + PenpalRococoA, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalRococoA::para_id()), + ); + + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + amount: *amount == t.args.amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::Success { .. }) => {}, + ] + ); +} + fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -137,6 +183,17 @@ fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) ) } +fn para_to_system_para_limited_reserve_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + ::PolkadotXcm::limited_reserve_transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + /// Limited Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't /// work #[test] @@ -258,7 +315,7 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: PenpalRococoAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; let mut test = SystemParaToParaTest::new(test_args); @@ -286,6 +343,54 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { assert!(receiver_balance_after > receiver_balance_before); } +/// Limited Reserve Transfers of native asset from Parachain to System Parachain should work +#[test] +fn limited_reserve_transfer_native_asset_from_para_to_system_para() { + // Init values for Penpal Parachain + let destination = PenpalRococoA::sibling_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: PenpalRococoASender::get(), + receiver: AssetHubRococoReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + }; + + let mut test = ParaToSystemParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + // MultiLocation { parents: 1, interior: X1(Parachain(PenpalRococoA::para_id().into())) }; + let penpal_location_as_seen_by_ahr = + AssetHubRococo::sibling_location_of(PenpalRococoA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); + + // fund the Penpal's SA on AHR with the native tokens held in reserve + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount_to_send * 2)]); + + test.set_assertion::(para_to_system_para_sender_assertions); + test.set_assertion::(para_to_system_para_receiver_assertions); + test.set_dispatchable::(para_to_system_para_limited_reserve_transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + let delivery_fees = PenpalRococoA::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); +} + /// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work #[test] fn limited_reserve_transfer_asset_from_system_para_to_para() { @@ -307,13 +412,13 @@ fn limited_reserve_transfer_asset_from_system_para_to_para() { (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) .into(); - let system_para_test_args = TestContext { + let para_test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: PenpalRococoAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + let mut system_para_test = SystemParaToParaTest::new(para_test_args); system_para_test.set_assertion::(system_para_to_para_assets_assertions); // TODO: Add assertions when Penpal is able to manage assets diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs index 09f1e6c0a590..217bac6865c8 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs @@ -206,7 +206,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -247,7 +247,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -329,7 +329,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -370,7 +370,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; let mut test = SystemParaToRelayTest::new(test_args); diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs index 2ade5f81d8a9..d3ad32f7c946 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs @@ -53,7 +53,7 @@ pub type RelayToSystemParaTest = Test; pub type SystemParaToRelayTest = Test; pub type SystemParaToParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain across integration tests +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests pub fn relay_test_args(amount: Balance) -> TestArgs { TestArgs { dest: Westend::child_location_of(AssetHubWestend::para_id()), @@ -70,7 +70,7 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to de used for the System Parachain across integration tests +/// Returns a `TestArgs` instance to be used for the System Parachain across integration tests pub fn system_para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs index ecf773765b3e..ab7043b37f8f 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs @@ -46,7 +46,7 @@ pub type RelayToSystemParaTest = Test; pub type SystemParaToRelayTest = Test; pub type SystemParaToParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain across integration tests +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests pub fn relay_test_args(amount: Balance) -> TestArgs { TestArgs { dest: Rococo::child_location_of(AssetHubRococo::para_id()), diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 8a8081c9fac3..1ec006f4510b 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -189,6 +189,7 @@ decl_test_parachains! { pallets = { PolkadotXcm: penpal_runtime::PolkadotXcm, Assets: penpal_runtime::Assets, + Balances: penpal_runtime::Balances, } }, pub struct PenpalRococoB { @@ -326,6 +327,7 @@ impl_accounts_helpers_for_parachain!(BridgeHubRococo); impl_assert_events_helpers_for_parachain!(BridgeHubRococo); // PenpalRococo implementations +impl_accounts_helpers_for_parachain!(PenpalRococoA); impl_assert_events_helpers_for_parachain!(PenpalRococoA); impl_assert_events_helpers_for_parachain!(PenpalRococoB); From 1cd6acdff3618a547dc98d9fd0c7984b5db933ef Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Wed, 1 Nov 2023 15:10:33 +0200 Subject: [PATCH 55/69] Move syncing code from `sc-network-common` to `sc-network-sync` (#1912) This PR moves syncing-related code from `sc-network-common` to `sc-network-sync`. Unfortunately, some parts are tightly integrated with networking, so they were left in `sc-network-common` for now: 1. `SyncMode` in `common/src/sync.rs` (used in `NetworkConfiguration`). 2. `BlockAnnouncesHandshake`, `BlockRequest`, `BlockResponse`, etc. in `common/src/sync/message.rs` (used in `src/protocol.rs` and `src/protocol/message.rs`). More substantial refactoring is needed to decouple syncing and networking completely, including getting rid of the hardcoded sync protocol. ## Release notes Move syncing-related code from `sc-network-common` to `sc-network-sync`. Delete `ChainSync` trait as it's never used (the only implementation is accessed directly from `SyncingEngine` and exposes a lot of public methods that are not part of the trait). Some new trait(s) for syncing will likely be introduced as part of Sync 2.0 refactoring to represent syncing strategies. --- Cargo.lock | 5 + substrate/client/consensus/grandpa/Cargo.toml | 1 + .../grandpa/src/communication/mod.rs | 2 +- .../grandpa/src/communication/tests.rs | 6 +- .../consensus/grandpa/src/warp_proof.rs | 2 +- substrate/client/informant/Cargo.toml | 1 + substrate/client/informant/src/display.rs | 2 +- substrate/client/informant/src/lib.rs | 2 +- substrate/client/network-gossip/Cargo.toml | 1 + substrate/client/network-gossip/src/bridge.rs | 5 +- substrate/client/network-gossip/src/lib.rs | 2 +- substrate/client/network/common/src/sync.rs | 342 -- .../client/network/common/src/sync/warp.rs | 101 - substrate/client/network/src/config.rs | 14 +- substrate/client/network/src/lib.rs | 9 +- substrate/client/network/statement/Cargo.toml | 1 + substrate/client/network/statement/src/lib.rs | 6 +- .../network/sync/src/block_request_handler.rs | 4 +- .../client/network/sync/src/chain_sync.rs | 2435 ++++++++++++ .../network/sync/src/chain_sync/test.rs | 1085 ++++++ substrate/client/network/sync/src/engine.rs | 49 +- .../client/network/sync/src/extra_requests.rs | 8 +- substrate/client/network/sync/src/lib.rs | 3460 +---------------- substrate/client/network/sync/src/mock.rs | 61 +- .../network/sync/src/pending_responses.rs | 7 +- .../src/request_metrics.rs} | 0 .../network/sync/src/service/chain_sync.rs | 5 +- substrate/client/network/sync/src/state.rs | 6 +- substrate/client/network/sync/src/types.rs | 206 + substrate/client/network/sync/src/warp.rs | 95 +- .../network/sync/src/warp_request_handler.rs | 2 +- substrate/client/network/test/src/lib.rs | 9 +- .../client/network/transactions/Cargo.toml | 1 + .../client/network/transactions/src/lib.rs | 7 +- substrate/client/service/src/metrics.rs | 2 +- 35 files changed, 3903 insertions(+), 4041 deletions(-) delete mode 100644 substrate/client/network/common/src/sync/warp.rs create mode 100644 substrate/client/network/sync/src/chain_sync.rs create mode 100644 substrate/client/network/sync/src/chain_sync/test.rs rename substrate/client/network/{common/src/sync/metrics.rs => sync/src/request_metrics.rs} (100%) create mode 100644 substrate/client/network/sync/src/types.rs diff --git a/Cargo.lock b/Cargo.lock index 1f22fccd2c1a..0f386c52b384 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14995,6 +14995,7 @@ dependencies = [ "sc-network", "sc-network-common", "sc-network-gossip", + "sc-network-sync", "sc-network-test", "sc-telemetry", "sc-transaction-pool-api", @@ -15211,6 +15212,7 @@ dependencies = [ "sc-client-api", "sc-network", "sc-network-common", + "sc-network-sync", "sp-blockchain", "sp-runtime", ] @@ -15363,6 +15365,7 @@ dependencies = [ "quickcheck", "sc-network", "sc-network-common", + "sc-network-sync", "schnellru", "sp-runtime", "substrate-prometheus-endpoint", @@ -15403,6 +15406,7 @@ dependencies = [ "parity-scale-codec", "sc-network", "sc-network-common", + "sc-network-sync", "sp-consensus", "sp-statement-store", "substrate-prometheus-endpoint", @@ -15489,6 +15493,7 @@ dependencies = [ "parity-scale-codec", "sc-network", "sc-network-common", + "sc-network-sync", "sc-utils", "sp-consensus", "sp-runtime", diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 472bdd1c5b82..921b9c539e37 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -37,6 +37,7 @@ sc-consensus = { path = "../common" } sc-network = { path = "../../network" } sc-network-gossip = { path = "../../network-gossip" } sc-network-common = { path = "../../network/common" } +sc-network-sync = { path = "../../network/sync" } sc-telemetry = { path = "../../telemetry" } sc-utils = { path = "../../utils" } sp-api = { path = "../../../primitives/api" } diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs index c0749858568f..6d9e956b41be 100644 --- a/substrate/client/consensus/grandpa/src/communication/mod.rs +++ b/substrate/client/consensus/grandpa/src/communication/mod.rs @@ -59,7 +59,7 @@ use crate::{ use gossip::{ FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; -use sc_network_common::sync::SyncEventStream; +use sc_network_sync::SyncEventStream; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_consensus_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs index 10c4772fc76d..4a869d0f5152 100644 --- a/substrate/client/consensus/grandpa/src/communication/tests.rs +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -33,11 +33,9 @@ use sc_network::{ NetworkSyncForkRequest, NotificationSenderError, NotificationSenderT as NotificationSender, PeerId, ReputationChange, }; -use sc_network_common::{ - role::ObservedRole, - sync::{SyncEvent as SyncStreamEvent, SyncEventStream}, -}; +use sc_network_common::role::ObservedRole; use sc_network_gossip::Validator; +use sc_network_sync::{SyncEvent as SyncStreamEvent, SyncEventStream}; use sc_network_test::{Block, Hash}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_consensus_grandpa::AuthorityList; diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs index ea8114eafd78..dcd55dcdf3af 100644 --- a/substrate/client/consensus/grandpa/src/warp_proof.rs +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -23,7 +23,7 @@ use crate::{ BlockNumberOps, GrandpaJustification, SharedAuthoritySet, }; use sc_client_api::Backend as ClientBackend; -use sc_network_common::sync::warp::{EncodedProof, VerificationResult, WarpSyncProvider}; +use sc_network_sync::warp::{EncodedProof, VerificationResult, WarpSyncProvider}; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_consensus_grandpa::{AuthorityList, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ diff --git a/substrate/client/informant/Cargo.toml b/substrate/client/informant/Cargo.toml index e077f4e11a59..47e65df3cc11 100644 --- a/substrate/client/informant/Cargo.toml +++ b/substrate/client/informant/Cargo.toml @@ -19,6 +19,7 @@ futures-timer = "3.0.1" log = "0.4.17" sc-client-api = { path = "../api" } sc-network-common = { path = "../network/common" } +sc-network-sync = { path = "../network/sync" } sc-network = { path = "../network" } sp-blockchain = { path = "../../primitives/blockchain" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/client/informant/src/display.rs b/substrate/client/informant/src/display.rs index 722cf56d778d..64ddb71d572e 100644 --- a/substrate/client/informant/src/display.rs +++ b/substrate/client/informant/src/display.rs @@ -21,7 +21,7 @@ use ansi_term::Colour; use log::info; use sc_client_api::ClientInfo; use sc_network::NetworkStatus; -use sc_network_common::sync::{ +use sc_network_sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, SyncState, SyncStatus, }; diff --git a/substrate/client/informant/src/lib.rs b/substrate/client/informant/src/lib.rs index 03f9075055e2..b072f8551f9f 100644 --- a/substrate/client/informant/src/lib.rs +++ b/substrate/client/informant/src/lib.rs @@ -24,7 +24,7 @@ use futures_timer::Delay; use log::{debug, info, trace}; use sc_client_api::{BlockchainEvents, UsageProvider}; use sc_network::NetworkStatusProvider; -use sc_network_common::sync::SyncStatusProvider; +use sc_network_sync::SyncStatusProvider; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; use std::{collections::VecDeque, fmt::Display, sync::Arc, time::Duration}; diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index 73d2d3fa051e..95e26a232c1d 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -24,6 +24,7 @@ tracing = "0.1.29" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-network = { path = "../network" } sc-network-common = { path = "../network/common" } +sc-network-sync = { path = "../network/sync" } sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index 6a3790ee2b2b..8f7d490757b3 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -22,7 +22,7 @@ use crate::{ }; use sc_network::{event::Event, types::ProtocolName, ReputationChange}; -use sc_network_common::sync::SyncEvent; +use sc_network_sync::SyncEvent; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -338,7 +338,8 @@ mod tests { config::MultiaddrWithPeerId, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NotificationSenderError, NotificationSenderT as NotificationSender, }; - use sc_network_common::{role::ObservedRole, sync::SyncEventStream}; + use sc_network_common::role::ObservedRole; + use sc_network_sync::SyncEventStream; use sp_runtime::{ testing::H256, traits::{Block as BlockT, NumberFor}, diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs index 5b02be5c23f6..a77141ec6f63 100644 --- a/substrate/client/network-gossip/src/lib.rs +++ b/substrate/client/network-gossip/src/lib.rs @@ -71,7 +71,7 @@ use libp2p::{multiaddr, PeerId}; use sc_network::{ types::ProtocolName, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, }; -use sc_network_common::sync::SyncEventStream; +use sc_network_sync::SyncEventStream; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::iter; diff --git a/substrate/client/network/common/src/sync.rs b/substrate/client/network/common/src/sync.rs index 4ca21221f87f..a910740aef64 100644 --- a/substrate/client/network/common/src/sync.rs +++ b/substrate/client/network/common/src/sync.rs @@ -19,150 +19,6 @@ //! Abstract interfaces and data structures related to network sync. pub mod message; -pub mod metrics; -pub mod warp; - -use crate::{role::Roles, types::ReputationChange}; -use futures::Stream; - -use libp2p_identity::PeerId; - -use message::{BlockAnnounce, BlockRequest, BlockResponse}; -use sc_consensus::{import_queue::RuntimeOrigin, IncomingBlock}; -use sp_consensus::BlockOrigin; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, - Justifications, -}; -use warp::WarpSyncProgress; - -use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc}; - -/// The sync status of a peer we are trying to sync with -#[derive(Debug)] -pub struct PeerInfo { - /// Their best block hash. - pub best_hash: Block::Hash, - /// Their best block number. - pub best_number: NumberFor, -} - -/// Info about a peer's known state (both full and light). -#[derive(Clone, Debug)] -pub struct ExtendedPeerInfo { - /// Roles - pub roles: Roles, - /// Peer best block hash - pub best_hash: B::Hash, - /// Peer best block number - pub best_number: NumberFor, -} - -/// Reported sync state. -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum SyncState { - /// Initial sync is complete, keep-up sync is active. - Idle, - /// Actively catching up with the chain. - Downloading { target: BlockNumber }, - /// All blocks are downloaded and are being imported. - Importing { target: BlockNumber }, -} - -impl SyncState { - /// Are we actively catching up with the chain? - pub fn is_major_syncing(&self) -> bool { - !matches!(self, SyncState::Idle) - } -} - -/// Reported state download progress. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct StateDownloadProgress { - /// Estimated download percentage. - pub percentage: u32, - /// Total state size in bytes downloaded so far. - pub size: u64, -} - -/// Syncing status and statistics. -#[derive(Debug, Clone)] -pub struct SyncStatus { - /// Current global sync state. - pub state: SyncState>, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_peers: u32, - /// Number of peers known to `SyncingEngine` (both full and light). - pub num_connected_peers: u32, - /// Number of blocks queued for import - pub queued_blocks: u32, - /// State sync status in progress, if any. - pub state_sync: Option, - /// Warp sync in progress, if any. - pub warp_sync: Option>, -} - -/// A peer did not behave as expected and should be reported. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct BadPeer(pub PeerId, pub ReputationChange); - -impl fmt::Display for BadPeer { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) - } -} - -impl std::error::Error for BadPeer {} - -/// Action that the parent of [`ChainSync`] should perform if we want to import blocks. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ImportBlocksAction { - pub origin: BlockOrigin, - pub blocks: Vec>, -} - -/// Result of [`ChainSync::on_block_data`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockData { - /// The block should be imported. - Import(ImportBlocksAction), - /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest), - /// Continue processing events. - Continue, -} - -/// Result of [`ChainSync::on_block_justification`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockJustification { - /// The justification needs no further handling. - Nothing, - /// The justification should be imported. - Import { - peer_id: PeerId, - hash: Block::Hash, - number: NumberFor, - justifications: Justifications, - }, -} - -/// Result of `ChainSync::on_state_data`. -#[derive(Debug)] -pub enum OnStateData { - /// The block and state that should be imported. - Import(BlockOrigin, IncomingBlock), - /// A new state request needs to be made to the given peer. - Continue, -} - -/// Block or justification request polled from `ChainSync` -#[derive(Debug)] -pub enum ImportResult { - BlockImport(BlockOrigin, Vec>), - JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), -} /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] @@ -197,201 +53,3 @@ impl Default for SyncMode { Self::Full } } -#[derive(Debug)] -pub struct Metrics { - pub queued_blocks: u32, - pub fork_targets: u32, - pub justifications: metrics::Metrics, -} - -#[derive(Debug)] -pub enum PeerRequest { - Block(BlockRequest), - State, - WarpProof, -} - -#[derive(Debug)] -pub enum PeerRequestType { - Block, - State, - WarpProof, -} - -impl PeerRequest { - pub fn get_type(&self) -> PeerRequestType { - match self { - PeerRequest::Block(_) => PeerRequestType::Block, - PeerRequest::State => PeerRequestType::State, - PeerRequest::WarpProof => PeerRequestType::WarpProof, - } - } -} - -/// Wrapper for implementation-specific state request. -/// -/// NOTE: Implementation must be able to encode and decode it for network purposes. -pub struct OpaqueStateRequest(pub Box); - -impl fmt::Debug for OpaqueStateRequest { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("OpaqueStateRequest").finish() - } -} - -/// Wrapper for implementation-specific state response. -/// -/// NOTE: Implementation must be able to encode and decode it for network purposes. -pub struct OpaqueStateResponse(pub Box); - -impl fmt::Debug for OpaqueStateResponse { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("OpaqueStateResponse").finish() - } -} - -/// Provides high-level status of syncing. -#[async_trait::async_trait] -pub trait SyncStatusProvider: Send + Sync { - /// Get high-level view of the syncing status. - async fn status(&self) -> Result, ()>; -} - -#[async_trait::async_trait] -impl SyncStatusProvider for Arc -where - T: ?Sized, - T: SyncStatusProvider, - Block: BlockT, -{ - async fn status(&self) -> Result, ()> { - T::status(self).await - } -} - -/// Syncing-related events that other protocols can subscribe to. -pub enum SyncEvent { - /// Peer that the syncing implementation is tracking connected. - PeerConnected(PeerId), - - /// Peer that the syncing implementation was tracking disconnected. - PeerDisconnected(PeerId), -} - -pub trait SyncEventStream: Send + Sync { - /// Subscribe to syncing-related events. - fn event_stream(&self, name: &'static str) -> Pin + Send>>; -} - -impl SyncEventStream for Arc -where - T: ?Sized, - T: SyncEventStream, -{ - fn event_stream(&self, name: &'static str) -> Pin + Send>> { - T::event_stream(self, name) - } -} - -/// Something that represents the syncing strategy to download past and future blocks of the chain. -pub trait ChainSync: Send { - /// Returns the state of the sync of the given peer. - /// - /// Returns `None` if the peer is unknown. - fn peer_info(&self, who: &PeerId) -> Option>; - - /// Returns the current sync status. - fn status(&self) -> SyncStatus; - - /// Number of active forks requests. This includes - /// requests that are pending or could be issued right away. - fn num_sync_requests(&self) -> usize; - - /// Number of downloaded blocks. - fn num_downloaded_blocks(&self) -> usize; - - /// Returns the current number of peers stored within this state machine. - fn num_peers(&self) -> usize; - - /// Handle a new connected peer. - /// - /// Call this method whenever we connect to a new peer. - #[must_use] - fn new_peer( - &mut self, - who: PeerId, - best_hash: Block::Hash, - best_number: NumberFor, - ) -> Result>, BadPeer>; - - /// Signal that a new best block has been imported. - fn update_chain_info(&mut self, best_hash: &Block::Hash, best_number: NumberFor); - - /// Schedule a justification request for the given block. - fn request_justification(&mut self, hash: &Block::Hash, number: NumberFor); - - /// Clear all pending justification requests. - fn clear_justification_requests(&mut self); - - /// Request syncing for the given block from given set of peers. - fn set_sync_fork_request( - &mut self, - peers: Vec, - hash: &Block::Hash, - number: NumberFor, - ); - - /// Handle a response from the remote to a block request that we made. - /// - /// `request` must be the original request that triggered `response`. - /// or `None` if data comes from the block announcement. - /// - /// If this corresponds to a valid block, this outputs the block that - /// must be imported in the import queue. - #[must_use] - fn on_block_data( - &mut self, - who: &PeerId, - request: Option>, - response: BlockResponse, - ) -> Result, BadPeer>; - - /// Handle a response from the remote to a justification request that we made. - /// - /// `request` must be the original request that triggered `response`. - #[must_use] - fn on_block_justification( - &mut self, - who: PeerId, - response: BlockResponse, - ) -> Result, BadPeer>; - - /// Call this when a justification has been processed by the import queue, - /// with or without errors. - fn on_justification_import( - &mut self, - hash: Block::Hash, - number: NumberFor, - success: bool, - ); - - /// Notify about finalization of the given block. - fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); - - /// Notify about pre-validated block announcement. - fn on_validated_block_announce( - &mut self, - is_best: bool, - who: PeerId, - announce: &BlockAnnounce, - ); - - /// Call when a peer has disconnected. - /// Canceled obsolete block request may result in some blocks being ready for - /// import, so this functions checks for such blocks and returns them. - #[must_use] - fn peer_disconnected(&mut self, who: &PeerId) -> Option>; - - /// Return some key metrics. - fn metrics(&self) -> Metrics; -} diff --git a/substrate/client/network/common/src/sync/warp.rs b/substrate/client/network/common/src/sync/warp.rs deleted file mode 100644 index f4e39f438512..000000000000 --- a/substrate/client/network/common/src/sync/warp.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use codec::{Decode, Encode}; -pub use sp_consensus_grandpa::{AuthorityList, SetId}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::fmt; - -/// Scale-encoded warp sync proof response. -pub struct EncodedProof(pub Vec); - -/// Warp sync request -#[derive(Encode, Decode, Debug)] -pub struct WarpProofRequest { - /// Start collecting proofs from this block. - pub begin: B::Hash, -} - -/// Proof verification result. -pub enum VerificationResult { - /// Proof is valid, but the target was not reached. - Partial(SetId, AuthorityList, Block::Hash), - /// Target finality is proved. - Complete(SetId, AuthorityList, Block::Header), -} - -/// Warp sync backend. Handles retrieving and verifying warp sync proofs. -pub trait WarpSyncProvider: Send + Sync { - /// Generate proof starting at given block hash. The proof is accumulated until maximum proof - /// size is reached. - fn generate( - &self, - start: Block::Hash, - ) -> Result>; - /// Verify warp proof against current set of authorities. - fn verify( - &self, - proof: &EncodedProof, - set_id: SetId, - authorities: AuthorityList, - ) -> Result, Box>; - /// Get current list of authorities. This is supposed to be genesis authorities when starting - /// sync. - fn current_authorities(&self) -> AuthorityList; -} - -/// Reported warp sync phase. -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum WarpSyncPhase { - /// Waiting for peers to connect. - AwaitingPeers { required_peers: usize }, - /// Waiting for target block to be received. - AwaitingTargetBlock, - /// Downloading and verifying grandpa warp proofs. - DownloadingWarpProofs, - /// Downloading target block. - DownloadingTargetBlock, - /// Downloading state data. - DownloadingState, - /// Importing state. - ImportingState, - /// Downloading block history. - DownloadingBlocks(NumberFor), -} - -impl fmt::Display for WarpSyncPhase { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::AwaitingPeers { required_peers } => - write!(f, "Waiting for {required_peers} peers to be connected"), - Self::AwaitingTargetBlock => write!(f, "Waiting for target block to be received"), - Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), - Self::DownloadingTargetBlock => write!(f, "Downloading target block"), - Self::DownloadingState => write!(f, "Downloading state"), - Self::ImportingState => write!(f, "Importing state"), - Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), - } - } -} - -/// Reported warp sync progress. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct WarpSyncProgress { - /// Estimated download percentage. - pub phase: WarpSyncPhase, - /// Total bytes downloaded so far. - pub total_bytes: u64, -} diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs index d069c3f458ff..124d73a74dbc 100644 --- a/substrate/client/network/src/config.rs +++ b/substrate/client/network/src/config.rs @@ -30,7 +30,11 @@ pub use crate::{ types::ProtocolName, }; -pub use libp2p::{identity::Keypair, multiaddr, Multiaddr, PeerId}; +pub use libp2p::{ + build_multiaddr, + identity::{self, ed25519, Keypair}, + multiaddr, Multiaddr, PeerId, +}; use crate::peer_store::PeerStoreHandle; use codec::Encode; @@ -39,9 +43,10 @@ use zeroize::Zeroize; pub use sc_network_common::{ role::{Role, Roles}, - sync::{warp::WarpSyncProvider, SyncMode}, + sync::SyncMode, ExHashT, }; + use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::Block as BlockT; @@ -58,11 +63,6 @@ use std::{ str::{self, FromStr}, }; -pub use libp2p::{ - build_multiaddr, - identity::{self, ed25519}, -}; - /// Protocol name prefix, transmitted on the wire for legacy protocol names. /// I.e., `dot` in `/dot/sync/2`. Should be unique for each chain. Always UTF-8. /// Deprecated in favour of genesis hash & fork ID based protocol names. diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs index ee3075968784..4dc9bdb4cc1c 100644 --- a/substrate/client/network/src/lib.rs +++ b/substrate/client/network/src/lib.rs @@ -266,14 +266,7 @@ pub use event::{DhtEvent, Event, SyncEvent}; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use request_responses::{Config, IfDisconnected, RequestFailure}; -pub use sc_network_common::{ - role::ObservedRole, - sync::{ - warp::{WarpSyncPhase, WarpSyncProgress}, - ExtendedPeerInfo, StateDownloadProgress, SyncEventStream, SyncState, SyncStatusProvider, - }, - types::ReputationChange, -}; +pub use sc_network_common::{role::ObservedRole, types::ReputationChange}; pub use service::{ signature::Signature, traits::{ diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index adfb2d6a05fb..ef974b4f33f1 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -21,6 +21,7 @@ libp2p = "0.51.3" log = "0.4.17" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-network-common = { path = "../common" } +sc-network-sync = { path = "../sync" } sc-network = { path = ".." } sp-consensus = { path = "../../../primitives/consensus/common" } sp-statement-store = { path = "../../../primitives/statement-store" } diff --git a/substrate/client/network/statement/src/lib.rs b/substrate/client/network/statement/src/lib.rs index c5d83b59b260..69d4faa13ef2 100644 --- a/substrate/client/network/statement/src/lib.rs +++ b/substrate/client/network/statement/src/lib.rs @@ -39,10 +39,8 @@ use sc_network::{ utils::{interval, LruHashSet}, NetworkEventStream, NetworkNotification, NetworkPeers, }; -use sc_network_common::{ - role::ObservedRole, - sync::{SyncEvent, SyncEventStream}, -}; +use sc_network_common::role::ObservedRole; +use sc_network_sync::{SyncEvent, SyncEventStream}; use sp_statement_store::{ Hash, NetworkPriority, Statement, StatementSource, StatementStore, SubmitResult, }; diff --git a/substrate/client/network/sync/src/block_request_handler.rs b/substrate/client/network/sync/src/block_request_handler.rs index c24083f63287..f363dda3a2d1 100644 --- a/substrate/client/network/sync/src/block_request_handler.rs +++ b/substrate/client/network/sync/src/block_request_handler.rs @@ -24,7 +24,6 @@ use crate::{ BlockResponse as BlockResponseSchema, BlockResponse, Direction, }, service::network::NetworkServiceHandle, - MAX_BLOCKS_IN_RESPONSE, }; use codec::{Decode, DecodeAll, Encode}; @@ -54,6 +53,9 @@ use std::{ time::Duration, }; +/// Maximum blocks per response. +pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128; + const LOG_TARGET: &str = "sync"; const MAX_BODY_BYTES: usize = 8 * 1024 * 1024; const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; diff --git a/substrate/client/network/sync/src/chain_sync.rs b/substrate/client/network/sync/src/chain_sync.rs new file mode 100644 index 000000000000..9cf0080e36ac --- /dev/null +++ b/substrate/client/network/sync/src/chain_sync.rs @@ -0,0 +1,2435 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Contains the state of the chain synchronization process +//! +//! At any given point in time, a running node tries as much as possible to be at the head of the +//! chain. This module handles the logic of which blocks to request from remotes, and processing +//! responses. It yields blocks to check and potentially move to the database. +//! +//! # Usage +//! +//! The `ChainSync` struct maintains the state of the block requests. Whenever something happens on +//! the network, or whenever a block has been successfully verified, call the appropriate method in +//! order to update it. + +use crate::{ + blocks::BlockCollection, + extra_requests::ExtraRequests, + schema::v1::StateResponse, + service::network::NetworkServiceHandle, + state::{ImportResult, StateSync}, + types::{ + BadPeer, Metrics, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, SyncMode, SyncState, + SyncStatus, + }, + warp::{ + self, EncodedProof, WarpProofImportResult, WarpProofRequest, WarpSync, WarpSyncConfig, + WarpSyncPhase, WarpSyncProgress, + }, +}; + +use codec::Encode; +use libp2p::PeerId; +use log::{debug, error, info, trace, warn}; + +use sc_client_api::{BlockBackend, ProofProvider}; +use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; +use sc_network::types::ProtocolName; +use sc_network_common::sync::message::{ + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock, +}; +use sp_arithmetic::traits::Saturating; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; +use sp_consensus::{BlockOrigin, BlockStatus}; +use sp_runtime::{ + traits::{ + Block as BlockT, CheckedSub, Hash, HashingFor, Header as HeaderT, NumberFor, One, + SaturatedConversion, Zero, + }, + EncodedJustification, Justifications, +}; + +use std::{ + collections::{HashMap, HashSet}, + ops::Range, + sync::Arc, +}; + +#[cfg(test)] +mod test; + +/// Log target for this file. +const LOG_TARGET: &'static str = "sync"; + +/// Maximum blocks to store in the import queue. +const MAX_IMPORTING_BLOCKS: usize = 2048; + +/// Maximum blocks to download ahead of any gap. +const MAX_DOWNLOAD_AHEAD: u32 = 2048; + +/// Maximum blocks to look backwards. The gap is the difference between the highest block and the +/// common block of a node. +const MAX_BLOCKS_TO_LOOK_BACKWARDS: u32 = MAX_DOWNLOAD_AHEAD / 2; + +/// Pick the state to sync as the latest finalized number minus this. +const STATE_SYNC_FINALITY_THRESHOLD: u32 = 8; + +/// We use a heuristic that with a high likelihood, by the time +/// `MAJOR_SYNC_BLOCKS` have been imported we'll be on the same +/// chain as (or at least closer to) the peer so we want to delay +/// the ancestor search to not waste time doing that when we are +/// so far behind. +const MAJOR_SYNC_BLOCKS: u8 = 5; + +/// Number of peers that need to be connected before warp sync is started. +const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; + +mod rep { + use sc_network::ReputationChange as Rep; + /// Reputation change when a peer sent us a message that led to a + /// database read error. + pub const BLOCKCHAIN_READ_ERROR: Rep = Rep::new(-(1 << 16), "DB Error"); + + /// Reputation change when a peer sent us a status message with a different + /// genesis than us. + pub const GENESIS_MISMATCH: Rep = Rep::new(i32::MIN, "Genesis mismatch"); + + /// Reputation change for peers which send us a block with an incomplete header. + pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); + + /// Reputation change for peers which send us a block which we fail to verify. + pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed"); + + /// Reputation change for peers which send us a known bad block. + pub const BAD_BLOCK: Rep = Rep::new(-(1 << 29), "Bad block"); + + /// Peer did not provide us with advertised block data. + pub const NO_BLOCK: Rep = Rep::new(-(1 << 29), "No requested block data"); + + /// Reputation change for peers which send us non-requested block data. + pub const NOT_REQUESTED: Rep = Rep::new(-(1 << 29), "Not requested block data"); + + /// Reputation change for peers which send us a block with bad justifications. + pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); + + /// Reputation change when a peer sent us invlid ancestry result. + pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); + + /// Peer response data does not have requested bits. + pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); +} + +enum AllowedRequests { + Some(HashSet), + All, +} + +impl AllowedRequests { + fn add(&mut self, id: &PeerId) { + if let Self::Some(ref mut set) = self { + set.insert(*id); + } + } + + fn take(&mut self) -> Self { + std::mem::take(self) + } + + fn set_all(&mut self) { + *self = Self::All; + } + + fn contains(&self, id: &PeerId) -> bool { + match self { + Self::Some(set) => set.contains(id), + Self::All => true, + } + } + + fn is_empty(&self) -> bool { + match self { + Self::Some(set) => set.is_empty(), + Self::All => false, + } + } + + fn clear(&mut self) { + std::mem::take(self); + } +} + +impl Default for AllowedRequests { + fn default() -> Self { + Self::Some(HashSet::default()) + } +} + +struct GapSync { + blocks: BlockCollection, + best_queued_number: NumberFor, + target: NumberFor, +} + +/// Action that the parent of [`ChainSync`] should perform after reporting imported blocks with +/// [`ChainSync::on_blocks_processed`]. +pub enum BlockRequestAction { + /// Send block request to peer. Always implies dropping a stale block request to the same peer. + SendRequest { peer_id: PeerId, request: BlockRequest }, + /// Drop stale block request. + RemoveStale { peer_id: PeerId }, +} + +/// Action that the parent of [`ChainSync`] should perform if we want to import blocks. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ImportBlocksAction { + pub origin: BlockOrigin, + pub blocks: Vec>, +} + +/// Action that the parent of [`ChainSync`] should perform if we want to import justifications. +pub struct ImportJustificationsAction { + pub peer_id: PeerId, + pub hash: B::Hash, + pub number: NumberFor, + pub justifications: Justifications, +} + +/// Result of [`ChainSync::on_block_data`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockData { + /// The block should be imported. + Import(ImportBlocksAction), + /// A new block request needs to be made to the given peer. + Request(PeerId, BlockRequest), + /// Continue processing events. + Continue, +} + +/// Result of [`ChainSync::on_block_justification`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockJustification { + /// The justification needs no further handling. + Nothing, + /// The justification should be imported. + Import { + peer_id: PeerId, + hash: Block::Hash, + number: NumberFor, + justifications: Justifications, + }, +} + +// Result of [`ChainSync::on_state_data`]. +#[derive(Debug)] +pub enum OnStateData { + /// The block and state that should be imported. + Import(BlockOrigin, IncomingBlock), + /// A new state request needs to be made to the given peer. + Continue, +} + +/// Action that the parent of [`ChainSync`] should perform after reporting block response with +/// [`ChainSync::on_block_response`]. +pub enum OnBlockResponse { + /// Nothing to do + Nothing, + /// Perform block request. + SendBlockRequest { peer_id: PeerId, request: BlockRequest }, + /// Import blocks. + ImportBlocks(ImportBlocksAction), + /// Import justifications. + ImportJustifications(ImportJustificationsAction), +} + +/// The main data structure which contains all the state for a chains +/// active syncing strategy. +pub struct ChainSync { + /// Chain client. + client: Arc, + /// The active peers that we are using to sync and their PeerSync status + peers: HashMap>, + /// A `BlockCollection` of blocks that are being downloaded from peers + blocks: BlockCollection, + /// The best block number in our queue of blocks to import + best_queued_number: NumberFor, + /// The best block hash in our queue of blocks to import + best_queued_hash: B::Hash, + /// Current mode (full/light) + mode: SyncMode, + /// Any extra justification requests. + extra_justifications: ExtraRequests, + /// A set of hashes of blocks that are being downloaded or have been + /// downloaded and are queued for import. + queue_blocks: HashSet, + /// Fork sync targets. + fork_targets: HashMap>, + /// A set of peers for which there might be potential block requests + allowed_requests: AllowedRequests, + /// Maximum number of peers to ask the same blocks in parallel. + max_parallel_downloads: u32, + /// Maximum blocks per request. + max_blocks_per_request: u32, + /// Total number of downloaded blocks. + downloaded_blocks: usize, + /// State sync in progress, if any. + state_sync: Option>, + /// Warp sync in progress, if any. + warp_sync: Option>, + /// Warp sync configuration. + /// + /// Will be `None` after `self.warp_sync` is `Some(_)`. + warp_sync_config: Option>, + /// A temporary storage for warp sync target block until warp sync is initialized. + warp_sync_target_block_header: Option, + /// Enable importing existing blocks. This is used used after the state download to + /// catch up to the latest state while re-importing blocks. + import_existing: bool, + /// Gap download process. + gap_sync: Option>, + /// Handle for communicating with `NetworkService` + network_service: NetworkServiceHandle, + /// Protocol name used for block announcements + block_announce_protocol_name: ProtocolName, +} + +/// All the data we have about a Peer that we are trying to sync with +#[derive(Debug, Clone)] +pub(crate) struct PeerSync { + /// Peer id of this peer. + pub peer_id: PeerId, + /// The common number is the block number that is a common point of + /// ancestry for both our chains (as far as we know). + pub common_number: NumberFor, + /// The hash of the best block that we've seen for this peer. + pub best_hash: B::Hash, + /// The number of the best block that we've seen for this peer. + pub best_number: NumberFor, + /// The state of syncing this peer is in for us, generally categories + /// into `Available` or "busy" with something as defined by `PeerSyncState`. + pub state: PeerSyncState, +} + +impl PeerSync { + /// Update the `common_number` iff `new_common > common_number`. + fn update_common_number(&mut self, new_common: NumberFor) { + if self.common_number < new_common { + trace!( + target: LOG_TARGET, + "Updating peer {} common number from={} => to={}.", + self.peer_id, + self.common_number, + new_common, + ); + self.common_number = new_common; + } + } +} + +struct ForkTarget { + number: NumberFor, + parent_hash: Option, + peers: HashSet, +} + +/// The state of syncing between a Peer and ourselves. +/// +/// Generally two categories, "busy" or `Available`. If busy, the enum +/// defines what we are busy with. +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub(crate) enum PeerSyncState { + /// Available for sync requests. + Available, + /// Searching for ancestors the Peer has in common with us. + AncestorSearch { start: NumberFor, current: NumberFor, state: AncestorSearchState }, + /// Actively downloading new blocks, starting from the given Number. + DownloadingNew(NumberFor), + /// Downloading a stale block with given Hash. Stale means that it is a + /// block with a number that is lower than our best number. It might be + /// from a fork and not necessarily already imported. + DownloadingStale(B::Hash), + /// Downloading justification for given block hash. + DownloadingJustification(B::Hash), + /// Downloading state. + DownloadingState, + /// Downloading warp proof. + DownloadingWarpProof, + /// Downloading warp sync target block. + DownloadingWarpTargetBlock, + /// Actively downloading block history after warp sync. + DownloadingGap(NumberFor), +} + +impl PeerSyncState { + pub fn is_available(&self) -> bool { + matches!(self, Self::Available) + } +} + +impl ChainSync +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + /// Create a new instance. + pub fn new( + mode: SyncMode, + client: Arc, + block_announce_protocol_name: ProtocolName, + max_parallel_downloads: u32, + max_blocks_per_request: u32, + warp_sync_config: Option>, + network_service: NetworkServiceHandle, + ) -> Result { + let mut sync = Self { + client, + peers: HashMap::new(), + blocks: BlockCollection::new(), + best_queued_hash: Default::default(), + best_queued_number: Zero::zero(), + extra_justifications: ExtraRequests::new("justification"), + mode, + queue_blocks: Default::default(), + fork_targets: Default::default(), + allowed_requests: Default::default(), + max_parallel_downloads, + max_blocks_per_request, + downloaded_blocks: 0, + state_sync: None, + warp_sync: None, + import_existing: false, + gap_sync: None, + network_service, + warp_sync_config, + warp_sync_target_block_header: None, + block_announce_protocol_name, + }; + + sync.reset_sync_start_point()?; + Ok(sync) + } + + /// Get peer's best hash & number. + pub fn peer_info(&self, who: &PeerId) -> Option> { + self.peers + .get(who) + .map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) + } + + /// Returns the current sync status. + pub fn status(&self) -> SyncStatus { + let median_seen = self.median_seen(); + let best_seen_block = + median_seen.and_then(|median| (median > self.best_queued_number).then_some(median)); + let sync_state = if let Some(target) = median_seen { + // A chain is classified as downloading if the provided best block is + // more than `MAJOR_SYNC_BLOCKS` behind the best block or as importing + // if the same can be said about queued blocks. + let best_block = self.client.info().best_number; + if target > best_block && target - best_block > MAJOR_SYNC_BLOCKS.into() { + // If target is not queued, we're downloading, otherwise importing. + if target > self.best_queued_number { + SyncState::Downloading { target } + } else { + SyncState::Importing { target } + } + } else { + SyncState::Idle + } + } else { + SyncState::Idle + }; + + let warp_sync_progress = match (&self.warp_sync, &self.mode, &self.gap_sync) { + (_, _, Some(gap_sync)) => Some(WarpSyncProgress { + phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), + total_bytes: 0, + }), + (None, SyncMode::Warp, _) => Some(WarpSyncProgress { + phase: WarpSyncPhase::AwaitingPeers { + required_peers: MIN_PEERS_TO_START_WARP_SYNC, + }, + total_bytes: 0, + }), + (Some(sync), _, _) => Some(sync.progress()), + _ => None, + }; + + SyncStatus { + state: sync_state, + best_seen_block, + num_peers: self.peers.len() as u32, + num_connected_peers: 0u32, + queued_blocks: self.queue_blocks.len() as u32, + state_sync: self.state_sync.as_ref().map(|s| s.progress()), + warp_sync: warp_sync_progress, + } + } + + /// Get an estimate of the number of parallel sync requests. + pub fn num_sync_requests(&self) -> usize { + self.fork_targets + .values() + .filter(|f| f.number <= self.best_queued_number) + .count() + } + + /// Get the total number of downloaded blocks. + pub fn num_downloaded_blocks(&self) -> usize { + self.downloaded_blocks + } + + /// Get the number of peers known to the syncing state machine. + pub fn num_peers(&self) -> usize { + self.peers.len() + } + + /// Notify syncing state machine that a new sync peer has connected. + #[must_use] + pub fn new_peer( + &mut self, + who: PeerId, + best_hash: B::Hash, + best_number: NumberFor, + ) -> Result>, BadPeer> { + // There is nothing sync can get from the node that has no blockchain data. + match self.block_status(&best_hash) { + Err(e) => { + debug!(target:LOG_TARGET, "Error reading blockchain: {e}"); + Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) + }, + Ok(BlockStatus::KnownBad) => { + info!("💔 New peer with known bad best block {} ({}).", best_hash, best_number); + Err(BadPeer(who, rep::BAD_BLOCK)) + }, + Ok(BlockStatus::Unknown) => { + if best_number.is_zero() { + info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); + return Err(BadPeer(who, rep::GENESIS_MISMATCH)) + } + + // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have + // enough to do in the import queue that it's not worth kicking off + // an ancestor search, which is what we do in the next match case below. + if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { + debug!( + target:LOG_TARGET, + "New peer with unknown best hash {} ({}), assuming common block.", + self.best_queued_hash, + self.best_queued_number + ); + self.peers.insert( + who, + PeerSync { + peer_id: who, + common_number: self.best_queued_number, + best_hash, + best_number, + state: PeerSyncState::Available, + }, + ); + return Ok(None) + } + + // If we are at genesis, just start downloading. + let (state, req) = if self.best_queued_number.is_zero() { + debug!( + target:LOG_TARGET, + "New peer with best hash {best_hash} ({best_number}).", + ); + + (PeerSyncState::Available, None) + } else { + let common_best = std::cmp::min(self.best_queued_number, best_number); + + debug!( + target:LOG_TARGET, + "New peer with unknown best hash {} ({}), searching for common ancestor.", + best_hash, + best_number + ); + + ( + PeerSyncState::AncestorSearch { + current: common_best, + start: self.best_queued_number, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }, + Some(ancestry_request::(common_best)), + ) + }; + + self.allowed_requests.add(&who); + self.peers.insert( + who, + PeerSync { + peer_id: who, + common_number: Zero::zero(), + best_hash, + best_number, + state, + }, + ); + + if let SyncMode::Warp = self.mode { + if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none() + { + log::debug!(target: LOG_TARGET, "Starting warp state sync."); + + if let Some(config) = self.warp_sync_config.take() { + let mut warp_sync = WarpSync::new(self.client.clone(), config); + if let Some(header) = self.warp_sync_target_block_header.take() { + warp_sync.set_target_block(header); + } + self.warp_sync = Some(warp_sync); + } + } + } + Ok(req) + }, + Ok(BlockStatus::Queued) | + Ok(BlockStatus::InChainWithState) | + Ok(BlockStatus::InChainPruned) => { + debug!( + target: LOG_TARGET, + "New peer with known best hash {best_hash} ({best_number}).", + ); + self.peers.insert( + who, + PeerSync { + peer_id: who, + common_number: std::cmp::min(self.best_queued_number, best_number), + best_hash, + best_number, + state: PeerSyncState::Available, + }, + ); + self.allowed_requests.add(&who); + Ok(None) + }, + } + } + + /// Inform sync about a new best imported block. + pub fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { + self.on_block_queued(best_hash, best_number); + } + + /// Request extra justification. + pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + self.extra_justifications + .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) + } + + /// Clear extra justification requests. + pub fn clear_justification_requests(&mut self) { + self.extra_justifications.reset(); + } + + /// Configure an explicit fork sync request in case external code has detected that there is a + /// stale fork missing. + /// + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// + /// Passing empty `peers` set effectively removes the sync request. + // The implementation is similar to `on_validated_block_announce` with unknown parent hash. + pub fn set_sync_fork_request( + &mut self, + mut peers: Vec, + hash: &B::Hash, + number: NumberFor, + ) { + if peers.is_empty() { + peers = self + .peers + .iter() + // Only request blocks from peers who are ahead or on a par. + .filter(|(_, peer)| peer.best_number >= number) + .map(|(id, _)| *id) + .collect(); + + debug!( + target: LOG_TARGET, + "Explicit sync request for block {hash:?} with no peers specified. \ + Syncing from these peers {peers:?} instead.", + ); + } else { + debug!( + target: LOG_TARGET, + "Explicit sync request for block {hash:?} with {peers:?}", + ); + } + + if self.is_known(hash) { + debug!(target: LOG_TARGET, "Refusing to sync known hash {hash:?}"); + return + } + + trace!(target: LOG_TARGET, "Downloading requested old fork {hash:?}"); + for peer_id in &peers { + if let Some(peer) = self.peers.get_mut(peer_id) { + if let PeerSyncState::AncestorSearch { .. } = peer.state { + continue + } + + if number > peer.best_number { + peer.best_number = number; + peer.best_hash = *hash; + } + self.allowed_requests.add(peer_id); + } + } + + self.fork_targets + .entry(*hash) + .or_insert_with(|| ForkTarget { number, peers: Default::default(), parent_hash: None }) + .peers + .extend(peers); + } + + /// Submit a block response for processing. + #[must_use] + pub fn on_block_data( + &mut self, + who: &PeerId, + request: Option>, + response: BlockResponse, + ) -> Result, BadPeer> { + self.downloaded_blocks += response.blocks.len(); + let mut gap = false; + let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { + let mut blocks = response.blocks; + if request.as_ref().map_or(false, |r| r.direction == Direction::Descending) { + trace!(target: LOG_TARGET, "Reversing incoming block list"); + blocks.reverse() + } + self.allowed_requests.add(who); + if let Some(request) = request { + match &mut peer.state { + PeerSyncState::DownloadingNew(_) => { + self.blocks.clear_peer_download(who); + peer.state = PeerSyncState::Available; + if let Some(start_block) = + validate_blocks::(&blocks, who, Some(request))? + { + self.blocks.insert(start_block, blocks, *who); + } + self.ready_blocks() + }, + PeerSyncState::DownloadingGap(_) => { + peer.state = PeerSyncState::Available; + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_peer_download(who); + if let Some(start_block) = + validate_blocks::(&blocks, who, Some(request))? + { + gap_sync.blocks.insert(start_block, blocks, *who); + } + gap = true; + let blocks: Vec<_> = gap_sync + .blocks + .ready_blocks(gap_sync.best_queued_number + One::one()) + .into_iter() + .map(|block_data| { + let justifications = + block_data.block.justifications.or_else(|| { + legacy_justification_mapping( + block_data.block.justification, + ) + }); + IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + indexed_body: block_data.block.indexed_body, + justifications, + origin: block_data.origin, + allow_missing_state: true, + import_existing: self.import_existing, + skip_execution: true, + state: None, + } + }) + .collect(); + debug!( + target: LOG_TARGET, + "Drained {} gap blocks from {}", + blocks.len(), + gap_sync.best_queued_number, + ); + blocks + } else { + debug!(target: LOG_TARGET, "Unexpected gap block response from {who}"); + return Err(BadPeer(*who, rep::NO_BLOCK)) + } + }, + PeerSyncState::DownloadingStale(_) => { + peer.state = PeerSyncState::Available; + if blocks.is_empty() { + debug!(target: LOG_TARGET, "Empty block response from {who}"); + return Err(BadPeer(*who, rep::NO_BLOCK)) + } + validate_blocks::(&blocks, who, Some(request))?; + blocks + .into_iter() + .map(|b| { + let justifications = b + .justifications + .or_else(|| legacy_justification_mapping(b.justification)); + IncomingBlock { + hash: b.hash, + header: b.header, + body: b.body, + indexed_body: None, + justifications, + origin: Some(*who), + allow_missing_state: true, + import_existing: self.import_existing, + skip_execution: self.skip_execution(), + state: None, + } + }) + .collect() + }, + PeerSyncState::AncestorSearch { current, start, state } => { + let matching_hash = match (blocks.get(0), self.client.hash(*current)) { + (Some(block), Ok(maybe_our_block_hash)) => { + trace!( + target: LOG_TARGET, + "Got ancestry block #{} ({}) from peer {}", + current, + block.hash, + who, + ); + maybe_our_block_hash.filter(|x| x == &block.hash) + }, + (None, _) => { + debug!( + target: LOG_TARGET, + "Invalid response when searching for ancestor from {who}", + ); + return Err(BadPeer(*who, rep::UNKNOWN_ANCESTOR)) + }, + (_, Err(e)) => { + info!( + target: LOG_TARGET, + "❌ Error answering legitimate blockchain query: {e}", + ); + return Err(BadPeer(*who, rep::BLOCKCHAIN_READ_ERROR)) + }, + }; + if matching_hash.is_some() { + if *start < self.best_queued_number && + self.best_queued_number <= peer.best_number + { + // We've made progress on this chain since the search was started. + // Opportunistically set common number to updated number + // instead of the one that started the search. + peer.common_number = self.best_queued_number; + } else if peer.common_number < *current { + peer.common_number = *current; + } + } + if matching_hash.is_none() && current.is_zero() { + trace!( + target:LOG_TARGET, + "Ancestry search: genesis mismatch for peer {who}", + ); + return Err(BadPeer(*who, rep::GENESIS_MISMATCH)) + } + if let Some((next_state, next_num)) = + handle_ancestor_search_state(state, *current, matching_hash.is_some()) + { + peer.state = PeerSyncState::AncestorSearch { + current: next_num, + start: *start, + state: next_state, + }; + return Ok(OnBlockData::Request(*who, ancestry_request::(next_num))) + } else { + // Ancestry search is complete. Check if peer is on a stale fork unknown + // to us and add it to sync targets if necessary. + trace!( + target: LOG_TARGET, + "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", + self.best_queued_hash, + self.best_queued_number, + peer.best_hash, + peer.best_number, + matching_hash, + peer.common_number, + ); + if peer.common_number < peer.best_number && + peer.best_number < self.best_queued_number + { + trace!( + target: LOG_TARGET, + "Added fork target {} for {}", + peer.best_hash, + who, + ); + self.fork_targets + .entry(peer.best_hash) + .or_insert_with(|| ForkTarget { + number: peer.best_number, + parent_hash: None, + peers: Default::default(), + }) + .peers + .insert(*who); + } + peer.state = PeerSyncState::Available; + Vec::new() + } + }, + PeerSyncState::DownloadingWarpTargetBlock => { + peer.state = PeerSyncState::Available; + if let Some(warp_sync) = &mut self.warp_sync { + if blocks.len() == 1 { + validate_blocks::(&blocks, who, Some(request))?; + match warp_sync.import_target_block( + blocks.pop().expect("`blocks` len checked above."), + ) { + warp::TargetBlockImportResult::Success => + return Ok(OnBlockData::Continue), + warp::TargetBlockImportResult::BadResponse => + return Err(BadPeer(*who, rep::VERIFICATION_FAIL)), + } + } else if blocks.is_empty() { + debug!(target: LOG_TARGET, "Empty block response from {who}"); + return Err(BadPeer(*who, rep::NO_BLOCK)) + } else { + debug!( + target: LOG_TARGET, + "Too many blocks ({}) in warp target block response from {}", + blocks.len(), + who, + ); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + } + } else { + debug!( + target: LOG_TARGET, + "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", + who, + ); + return Ok(OnBlockData::Continue) + } + }, + PeerSyncState::Available | + PeerSyncState::DownloadingJustification(..) | + PeerSyncState::DownloadingState | + PeerSyncState::DownloadingWarpProof => Vec::new(), + } + } else { + // When request.is_none() this is a block announcement. Just accept blocks. + validate_blocks::(&blocks, who, None)?; + blocks + .into_iter() + .map(|b| { + let justifications = b + .justifications + .or_else(|| legacy_justification_mapping(b.justification)); + IncomingBlock { + hash: b.hash, + header: b.header, + body: b.body, + indexed_body: None, + justifications, + origin: Some(*who), + allow_missing_state: true, + import_existing: false, + skip_execution: true, + state: None, + } + }) + .collect() + } + } else { + // We don't know of this peer, so we also did not request anything from it. + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + }; + + Ok(OnBlockData::Import(self.validate_and_queue_blocks(new_blocks, gap))) + } + + /// Submit a justification response for processing. + #[must_use] + pub fn on_block_justification( + &mut self, + who: PeerId, + response: BlockResponse, + ) -> Result, BadPeer> { + let peer = if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!( + target: LOG_TARGET, + "💔 Called on_block_justification with a peer ID of an unknown peer", + ); + return Ok(OnBlockJustification::Nothing) + }; + + self.allowed_requests.add(&who); + if let PeerSyncState::DownloadingJustification(hash) = peer.state { + peer.state = PeerSyncState::Available; + + // We only request one justification at a time + let justification = if let Some(block) = response.blocks.into_iter().next() { + if hash != block.hash { + warn!( + target: LOG_TARGET, + "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", + who, + hash, + block.hash, + ); + return Err(BadPeer(who, rep::BAD_JUSTIFICATION)) + } + + block + .justifications + .or_else(|| legacy_justification_mapping(block.justification)) + } else { + // we might have asked the peer for a justification on a block that we assumed it + // had but didn't (regardless of whether it had a justification for it or not). + trace!( + target: LOG_TARGET, + "Peer {who:?} provided empty response for justification request {hash:?}", + ); + + None + }; + + if let Some((peer_id, hash, number, justifications)) = + self.extra_justifications.on_response(who, justification) + { + return Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) + } + } + + Ok(OnBlockJustification::Nothing) + } + + /// Report a justification import (successful or not). + pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; + self.extra_justifications + .try_finalize_root((hash, number), finalization_result, true); + self.allowed_requests.set_all(); + } + + /// Notify sync that a block has been finalized. + pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { + is_descendent_of(&**client, base, block) + }); + + if let SyncMode::LightState { skip_proofs, .. } = &self.mode { + if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { + // Finalized a recent block. + let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); + heads.sort(); + let median = heads[heads.len() / 2]; + if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { + if let Ok(Some(header)) = self.client.header(*hash) { + log::debug!( + target: LOG_TARGET, + "Starting state sync for #{number} ({hash})", + ); + self.state_sync = Some(StateSync::new( + self.client.clone(), + header, + None, + None, + *skip_proofs, + )); + self.allowed_requests.set_all(); + } + } + } + } + + if let Err(err) = r { + warn!( + target: LOG_TARGET, + "💔 Error cleaning up pending extra justification data requests: {err}", + ); + } + } + + /// Submit a validated block announcement. + pub fn on_validated_block_announce( + &mut self, + is_best: bool, + who: PeerId, + announce: &BlockAnnounce, + ) { + let number = *announce.header.number(); + let hash = announce.header.hash(); + let parent_status = + self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); + let known_parent = parent_status != BlockStatus::Unknown; + let ancient_parent = parent_status == BlockStatus::InChainPruned; + + let known = self.is_known(&hash); + let peer = if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID"); + return + }; + + if let PeerSyncState::AncestorSearch { .. } = peer.state { + trace!(target: LOG_TARGET, "Peer {} is in the ancestor search state.", who); + return + } + + if is_best { + // update their best block + peer.best_number = number; + peer.best_hash = hash; + } + + // If the announced block is the best they have and is not ahead of us, our common number + // is either one further ahead or it's the one they just announced, if we know about it. + if is_best { + if known && self.best_queued_number >= number { + self.update_peer_common_number(&who, number); + } else if announce.header.parent_hash() == &self.best_queued_hash || + known_parent && self.best_queued_number >= number + { + self.update_peer_common_number(&who, number.saturating_sub(One::one())); + } + } + self.allowed_requests.add(&who); + + // known block case + if known || self.is_already_downloading(&hash) { + trace!(target: "sync", "Known block announce from {}: {}", who, hash); + if let Some(target) = self.fork_targets.get_mut(&hash) { + target.peers.insert(who); + } + return + } + + if ancient_parent { + trace!( + target: "sync", + "Ignored ancient block announced from {}: {} {:?}", + who, + hash, + announce.header, + ); + return + } + + if self.status().state == SyncState::Idle { + trace!( + target: "sync", + "Added sync target for block announced from {}: {} {:?}", + who, + hash, + announce.summary(), + ); + self.fork_targets + .entry(hash) + .or_insert_with(|| ForkTarget { + number, + parent_hash: Some(*announce.header.parent_hash()), + peers: Default::default(), + }) + .peers + .insert(who); + } + } + + /// Notify that a sync peer has disconnected. + #[must_use] + pub fn peer_disconnected(&mut self, who: &PeerId) -> Option> { + self.blocks.clear_peer_download(who); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_peer_download(who) + } + self.peers.remove(who); + self.extra_justifications.peer_disconnected(who); + self.allowed_requests.set_all(); + self.fork_targets.retain(|_, target| { + target.peers.remove(who); + !target.peers.is_empty() + }); + + let blocks = self.ready_blocks(); + + (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + } + + /// Get prometheus metrics. + pub fn metrics(&self) -> Metrics { + Metrics { + queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), + fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), + justifications: self.extra_justifications.metrics(), + } + } + + /// Returns the median seen block number. + fn median_seen(&self) -> Option> { + let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); + + if best_seens.is_empty() { + None + } else { + let middle = best_seens.len() / 2; + + // Not the "perfect median" when we have an even number of peers. + Some(*best_seens.select_nth_unstable(middle).1) + } + } + + fn required_block_attributes(&self) -> BlockAttributes { + match self.mode { + SyncMode::Full => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::LightState { storage_chain_mode: true, .. } => + BlockAttributes::HEADER | + BlockAttributes::JUSTIFICATION | + BlockAttributes::INDEXED_BODY, + } + } + + fn skip_execution(&self) -> bool { + match self.mode { + SyncMode::Full => false, + SyncMode::LightState { .. } => true, + SyncMode::Warp => true, + } + } + + fn validate_and_queue_blocks( + &mut self, + mut new_blocks: Vec>, + gap: bool, + ) -> ImportBlocksAction { + let orig_len = new_blocks.len(); + new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); + if new_blocks.len() != orig_len { + debug!( + target: LOG_TARGET, + "Ignoring {} blocks that are already queued", + orig_len - new_blocks.len(), + ); + } + + let origin = if !gap && !self.status().state.is_major_syncing() { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; + + if let Some((h, n)) = new_blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) + { + trace!( + target:LOG_TARGET, + "Accepted {} blocks ({:?}) with origin {:?}", + new_blocks.len(), + h, + origin, + ); + self.on_block_queued(h, n) + } + self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); + + ImportBlocksAction { origin, blocks: new_blocks } + } + + fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.update_common_number(new_common); + } + } + + /// Called when a block has been queued for import. + /// + /// Updates our internal state for best queued block and then goes + /// through all peers to update our view of their state as well. + fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { + if self.fork_targets.remove(hash).is_some() { + trace!(target: LOG_TARGET, "Completed fork sync {hash:?}"); + } + if let Some(gap_sync) = &mut self.gap_sync { + if number > gap_sync.best_queued_number && number <= gap_sync.target { + gap_sync.best_queued_number = number; + } + } + if number > self.best_queued_number { + self.best_queued_number = number; + self.best_queued_hash = *hash; + // Update common blocks + for (n, peer) in self.peers.iter_mut() { + if let PeerSyncState::AncestorSearch { .. } = peer.state { + // Wait for ancestry search to complete first. + continue + } + let new_common_number = + if peer.best_number >= number { number } else { peer.best_number }; + trace!( + target: LOG_TARGET, + "Updating peer {} info, ours={}, common={}->{}, their best={}", + n, + number, + peer.common_number, + new_common_number, + peer.best_number, + ); + peer.common_number = new_common_number; + } + } + self.allowed_requests.set_all(); + } + + /// Restart the sync process. This will reset all pending block requests and return an iterator + /// of new block requests to make to peers. Peers that were downloading finality data (i.e. + /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. + fn restart(&mut self) -> impl Iterator, BadPeer>> + '_ { + self.blocks.clear(); + if let Err(e) = self.reset_sync_start_point() { + warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); + } + self.allowed_requests.set_all(); + debug!( + target: LOG_TARGET, + "Restarted with {} ({})", + self.best_queued_number, + self.best_queued_hash, + ); + let old_peers = std::mem::take(&mut self.peers); + + old_peers.into_iter().filter_map(move |(peer_id, mut p)| { + // peers that were downloading justifications + // should be kept in that state. + if let PeerSyncState::DownloadingJustification(_) = p.state { + // We make sure our commmon number is at least something we have. + p.common_number = self.best_queued_number; + self.peers.insert(peer_id, p); + return None + } + + // handle peers that were in other states. + match self.new_peer(peer_id, p.best_hash, p.best_number) { + // since the request is not a justification, remove it from pending responses + Ok(None) => Some(Ok(BlockRequestAction::RemoveStale { peer_id })), + // update the request if the new one is available + Ok(Some(request)) => Some(Ok(BlockRequestAction::SendRequest { peer_id, request })), + // this implies that we need to drop pending response from the peer + Err(e) => Some(Err(e)), + } + }) + } + + /// Find a block to start sync from. If we sync with state, that's the latest block we have + /// state for. + fn reset_sync_start_point(&mut self) -> Result<(), ClientError> { + let info = self.client.info(); + if matches!(self.mode, SyncMode::LightState { .. }) && info.finalized_state.is_some() { + warn!( + target: LOG_TARGET, + "Can't use fast sync mode with a partially synced database. Reverting to full sync mode." + ); + self.mode = SyncMode::Full; + } + if matches!(self.mode, SyncMode::Warp) && info.finalized_state.is_some() { + warn!( + target: LOG_TARGET, + "Can't use warp sync mode with a partially synced database. Reverting to full sync mode." + ); + self.mode = SyncMode::Full; + } + self.import_existing = false; + self.best_queued_hash = info.best_hash; + self.best_queued_number = info.best_number; + + if self.mode == SyncMode::Full && + self.client.block_status(info.best_hash)? != BlockStatus::InChainWithState + { + self.import_existing = true; + // Latest state is missing, start with the last finalized state or genesis instead. + if let Some((hash, number)) = info.finalized_state { + debug!(target: LOG_TARGET, "Starting from finalized state #{number}"); + self.best_queued_hash = hash; + self.best_queued_number = number; + } else { + debug!(target: LOG_TARGET, "Restarting from genesis"); + self.best_queued_hash = Default::default(); + self.best_queued_number = Zero::zero(); + } + } + + if let Some((start, end)) = info.block_gap { + debug!(target: LOG_TARGET, "Starting gap sync #{start} - #{end}"); + self.gap_sync = Some(GapSync { + best_queued_number: start - One::one(), + target: end, + blocks: BlockCollection::new(), + }); + } + trace!( + target: LOG_TARGET, + "Restarted sync at #{} ({:?})", + self.best_queued_number, + self.best_queued_hash, + ); + Ok(()) + } + + /// What is the status of the block corresponding to the given hash? + fn block_status(&self, hash: &B::Hash) -> Result { + if self.queue_blocks.contains(hash) { + return Ok(BlockStatus::Queued) + } + self.client.block_status(*hash) + } + + /// Is the block corresponding to the given hash known? + fn is_known(&self, hash: &B::Hash) -> bool { + self.block_status(hash).ok().map_or(false, |s| s != BlockStatus::Unknown) + } + + /// Is any peer downloading the given hash? + fn is_already_downloading(&self, hash: &B::Hash) -> bool { + self.peers + .iter() + .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) + } + + /// Check if the peer is known to the sync state machine. Used for sanity checks. + pub fn is_peer_known(&self, peer_id: &PeerId) -> bool { + self.peers.contains_key(peer_id) + } + + /// Get the set of downloaded blocks that are ready to be queued for import. + fn ready_blocks(&mut self) -> Vec> { + self.blocks + .ready_blocks(self.best_queued_number + One::one()) + .into_iter() + .map(|block_data| { + let justifications = block_data + .block + .justifications + .or_else(|| legacy_justification_mapping(block_data.block.justification)); + IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + indexed_body: block_data.block.indexed_body, + justifications, + origin: block_data.origin, + allow_missing_state: true, + import_existing: self.import_existing, + skip_execution: self.skip_execution(), + state: None, + } + }) + .collect() + } + + /// Set the warp sync target block externally in case we skip warp proofs downloading. + pub fn set_warp_sync_target_block(&mut self, header: B::Header) { + if let Some(ref mut warp_sync) = self.warp_sync { + warp_sync.set_target_block(header); + } else { + self.warp_sync_target_block_header = Some(header); + } + } + + /// Generate block request for downloading of the target block body during warp sync. + fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest)> { + let sync = &self.warp_sync.as_ref()?; + + if self.allowed_requests.is_empty() || + sync.is_complete() || + self.peers + .iter() + .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpTargetBlock) + { + // Only one pending warp target block request is allowed. + return None + } + + if let Some((target_number, request)) = sync.next_target_block_request() { + // Find a random peer that has a block with the target number. + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= target_number { + trace!(target: LOG_TARGET, "New warp target block request for {id}"); + peer.state = PeerSyncState::DownloadingWarpTargetBlock; + self.allowed_requests.clear(); + return Some((*id, request)) + } + } + } + + None + } + + /// Submit blocks received in a response. + #[must_use] + pub fn on_block_response( + &mut self, + peer_id: PeerId, + request: BlockRequest, + blocks: Vec>, + ) -> OnBlockResponse { + let block_response = BlockResponse:: { id: request.id, blocks }; + + let blocks_range = || match ( + block_response + .blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + trace!( + target: LOG_TARGET, + "BlockResponse {} from {} with {} blocks {}", + block_response.id, + peer_id, + block_response.blocks.len(), + blocks_range(), + ); + + if request.fields == BlockAttributes::JUSTIFICATION { + match self.on_block_justification(peer_id, block_response) { + Ok(OnBlockJustification::Nothing) => OnBlockResponse::Nothing, + Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) => + OnBlockResponse::ImportJustifications(ImportJustificationsAction { + peer_id, + hash, + number, + justifications, + }), + Err(BadPeer(id, repu)) => { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu); + OnBlockResponse::Nothing + }, + } + } else { + match self.on_block_data(&peer_id, Some(request), block_response) { + Ok(OnBlockData::Import(action)) => OnBlockResponse::ImportBlocks(action), + Ok(OnBlockData::Request(peer_id, request)) => + OnBlockResponse::SendBlockRequest { peer_id, request }, + Ok(OnBlockData::Continue) => OnBlockResponse::Nothing, + Err(BadPeer(id, repu)) => { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu); + OnBlockResponse::Nothing + }, + } + } + } + + /// Submit a state received in a response. + #[must_use] + pub fn on_state_response( + &mut self, + peer_id: PeerId, + response: OpaqueStateResponse, + ) -> Option> { + match self.on_state_data(&peer_id, response) { + Ok(OnStateData::Import(origin, block)) => + Some(ImportBlocksAction { origin, blocks: vec![block] }), + Ok(OnStateData::Continue) => None, + Err(BadPeer(id, repu)) => { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu); + None + }, + } + } + + /// Submit a warp proof response received. + pub fn on_warp_sync_response(&mut self, peer_id: PeerId, response: EncodedProof) { + if let Err(BadPeer(id, repu)) = self.on_warp_sync_data(&peer_id, response) { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu); + } + } + + /// Get justification requests scheduled by sync to be sent out. + pub fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + let peers = &mut self.peers; + let mut matcher = self.extra_justifications.matcher(); + std::iter::from_fn(move || { + if let Some((peer, request)) = matcher.next(peers) { + peers + .get_mut(&peer) + .expect( + "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", + ) + .state = PeerSyncState::DownloadingJustification(request.0); + let req = BlockRequest:: { + id: 0, + fields: BlockAttributes::JUSTIFICATION, + from: FromBlock::Hash(request.0), + direction: Direction::Ascending, + max: Some(1), + }; + Some((peer, req)) + } else { + None + } + }) + .collect() + } + + /// Get block requests scheduled by sync to be sent out. + pub fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + if self.mode == SyncMode::Warp { + return self + .warp_target_block_request() + .map_or_else(|| Vec::new(), |req| Vec::from([req])) + } + + if self.allowed_requests.is_empty() || self.state_sync.is_some() { + return Vec::new() + } + + if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { + trace!(target: LOG_TARGET, "Too many blocks in the queue."); + return Vec::new() + } + let is_major_syncing = self.status().state.is_major_syncing(); + let attrs = self.required_block_attributes(); + let blocks = &mut self.blocks; + let fork_targets = &mut self.fork_targets; + let last_finalized = + std::cmp::min(self.best_queued_number, self.client.info().finalized_number); + let best_queued = self.best_queued_number; + let client = &self.client; + let queue = &self.queue_blocks; + let allowed_requests = self.allowed_requests.take(); + let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; + let max_blocks_per_request = self.max_blocks_per_request; + let gap_sync = &mut self.gap_sync; + self.peers + .iter_mut() + .filter_map(move |(&id, peer)| { + if !peer.state.is_available() || !allowed_requests.contains(&id) { + return None + } + + // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from + // the common number, the peer best number is higher than our best queued and the + // common number is smaller than the last finalized block number, we should do an + // ancestor search to find a better common block. If the queue is full we wait till + // all blocks are imported though. + if best_queued.saturating_sub(peer.common_number) > + MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && + best_queued < peer.best_number && + peer.common_number < last_finalized && + queue.len() <= MAJOR_SYNC_BLOCKS.into() + { + trace!( + target: LOG_TARGET, + "Peer {:?} common block {} too far behind of our best {}. Starting ancestry search.", + id, + peer.common_number, + best_queued, + ); + let current = std::cmp::min(peer.best_number, best_queued); + peer.state = PeerSyncState::AncestorSearch { + current, + start: best_queued, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }; + Some((id, ancestry_request::(current))) + } else if let Some((range, req)) = peer_block_request( + &id, + peer, + blocks, + attrs, + max_parallel, + max_blocks_per_request, + last_finalized, + best_queued, + ) { + peer.state = PeerSyncState::DownloadingNew(range.start); + trace!( + target: LOG_TARGET, + "New block request for {}, (best:{}, common:{}) {:?}", + id, + peer.best_number, + peer.common_number, + req, + ); + Some((id, req)) + } else if let Some((hash, req)) = fork_sync_request( + &id, + fork_targets, + best_queued, + last_finalized, + attrs, + |hash| { + if queue.contains(hash) { + BlockStatus::Queued + } else { + client.block_status(*hash).unwrap_or(BlockStatus::Unknown) + } + }, + max_blocks_per_request, + ) { + trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {id}"); + peer.state = PeerSyncState::DownloadingStale(hash); + Some((id, req)) + } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { + peer_gap_block_request( + &id, + peer, + &mut sync.blocks, + attrs, + sync.target, + sync.best_queued_number, + max_blocks_per_request, + ) + }) { + peer.state = PeerSyncState::DownloadingGap(range.start); + trace!( + target: LOG_TARGET, + "New gap block request for {}, (best:{}, common:{}) {:?}", + id, + peer.best_number, + peer.common_number, + req, + ); + Some((id, req)) + } else { + None + } + }) + .collect() + } + + /// Get a state request scheduled by sync to be sent out (if any). + pub fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { + if self.allowed_requests.is_empty() { + return None + } + if (self.state_sync.is_some() || self.warp_sync.is_some()) && + self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) + { + // Only one pending state request is allowed. + return None + } + if let Some(sync) = &self.state_sync { + if sync.is_complete() { + return None + } + + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.common_number >= sync.target_block_num() { + peer.state = PeerSyncState::DownloadingState; + let request = sync.next_request(); + trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request); + self.allowed_requests.clear(); + return Some((*id, OpaqueStateRequest(Box::new(request)))) + } + } + } + if let Some(sync) = &self.warp_sync { + if sync.is_complete() { + return None + } + if let (Some(request), Some(target)) = + (sync.next_state_request(), sync.target_block_number()) + { + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= target { + trace!(target: LOG_TARGET, "New StateRequest for {id}: {request:?}"); + peer.state = PeerSyncState::DownloadingState; + self.allowed_requests.clear(); + return Some((*id, OpaqueStateRequest(Box::new(request)))) + } + } + } + } + None + } + + /// Get a warp proof request scheduled by sync to be sent out (if any). + pub fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { + if let Some(sync) = &self.warp_sync { + if self.allowed_requests.is_empty() || + sync.is_complete() || + self.peers + .iter() + .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpProof) + { + // Only one pending state request is allowed. + return None + } + if let Some(request) = sync.next_warp_proof_request() { + let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); + if !targets.is_empty() { + targets.sort(); + let median = targets[targets.len() / 2]; + // Find a random peer that is synced as much as peer majority. + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= median { + trace!(target: LOG_TARGET, "New WarpProofRequest for {id}"); + peer.state = PeerSyncState::DownloadingWarpProof; + self.allowed_requests.clear(); + return Some((*id, request)) + } + } + } + } + } + None + } + + fn on_state_data( + &mut self, + who: &PeerId, + response: OpaqueStateResponse, + ) -> Result, BadPeer> { + let response: Box = response.0.downcast().map_err(|_error| { + error!( + target: LOG_TARGET, + "Failed to downcast opaque state response, this is an implementation bug." + ); + + BadPeer(*who, rep::BAD_RESPONSE) + })?; + + if let Some(peer) = self.peers.get_mut(who) { + if let PeerSyncState::DownloadingState = peer.state { + peer.state = PeerSyncState::Available; + self.allowed_requests.set_all(); + } + } + let import_result = if let Some(sync) = &mut self.state_sync { + debug!( + target: LOG_TARGET, + "Importing state data from {} with {} keys, {} proof nodes.", + who, + response.entries.len(), + response.proof.len(), + ); + sync.import(*response) + } else if let Some(sync) = &mut self.warp_sync { + debug!( + target: LOG_TARGET, + "Importing state data from {} with {} keys, {} proof nodes.", + who, + response.entries.len(), + response.proof.len(), + ); + sync.import_state(*response) + } else { + debug!(target: LOG_TARGET, "Ignored obsolete state response from {who}"); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + }; + + match import_result { + ImportResult::Import(hash, header, state, body, justifications) => { + let origin = BlockOrigin::NetworkInitialSync; + let block = IncomingBlock { + hash, + header: Some(header), + body, + indexed_body: None, + justifications, + origin: None, + allow_missing_state: true, + import_existing: true, + skip_execution: self.skip_execution(), + state: Some(state), + }; + debug!(target: LOG_TARGET, "State download is complete. Import is queued"); + Ok(OnStateData::Import(origin, block)) + }, + ImportResult::Continue => Ok(OnStateData::Continue), + ImportResult::BadResponse => { + debug!(target: LOG_TARGET, "Bad state data received from {who}"); + Err(BadPeer(*who, rep::BAD_BLOCK)) + }, + } + } + + fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer> { + if let Some(peer) = self.peers.get_mut(who) { + if let PeerSyncState::DownloadingWarpProof = peer.state { + peer.state = PeerSyncState::Available; + self.allowed_requests.set_all(); + } + } + let import_result = if let Some(sync) = &mut self.warp_sync { + debug!( + target: LOG_TARGET, + "Importing warp proof data from {}, {} bytes.", + who, + response.0.len(), + ); + sync.import_warp_proof(response) + } else { + debug!(target: LOG_TARGET, "Ignored obsolete warp sync response from {who}"); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + }; + + match import_result { + WarpProofImportResult::Success => Ok(()), + WarpProofImportResult::BadResponse => { + debug!(target: LOG_TARGET, "Bad proof data received from {who}"); + Err(BadPeer(*who, rep::BAD_BLOCK)) + }, + } + } + + /// A batch of blocks have been processed, with or without errors. + /// + /// Call this when a batch of blocks have been processed by the import + /// queue, with or without errors. If an error is returned, the pending response + /// from the peer must be dropped. + #[must_use] + pub fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ) -> Box, BadPeer>>> { + trace!(target: LOG_TARGET, "Imported {imported} of {count}"); + + let mut output = Vec::new(); + + let mut has_error = false; + for (_, hash) in &results { + self.queue_blocks.remove(hash); + self.blocks.clear_queued(hash); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_queued(hash); + } + } + for (result, hash) in results { + if has_error { + break + } + + has_error |= result.is_err(); + + match result { + Ok(BlockImportStatus::ImportedKnown(number, who)) => + if let Some(peer) = who { + self.update_peer_common_number(&peer, number); + }, + Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { + if aux.clear_justification_requests { + trace!( + target: LOG_TARGET, + "Block imported clears all pending justification requests {number}: {hash:?}", + ); + self.clear_justification_requests(); + } + + if aux.needs_justification { + trace!( + target: LOG_TARGET, + "Block imported but requires justification {number}: {hash:?}", + ); + self.request_justification(&hash, number); + } + + if aux.bad_justification { + if let Some(ref peer) = who { + warn!("💔 Sent block with bad justification to import"); + output.push(Err(BadPeer(*peer, rep::BAD_JUSTIFICATION))); + } + } + + if let Some(peer) = who { + self.update_peer_common_number(&peer, number); + } + let state_sync_complete = + self.state_sync.as_ref().map_or(false, |s| s.target() == hash); + if state_sync_complete { + info!( + target: LOG_TARGET, + "State sync is complete ({} MiB), restarting block sync.", + self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), + ); + self.state_sync = None; + self.mode = SyncMode::Full; + output.extend(self.restart()); + } + let warp_sync_complete = self + .warp_sync + .as_ref() + .map_or(false, |s| s.target_block_hash() == Some(hash)); + if warp_sync_complete { + info!( + target: LOG_TARGET, + "Warp sync is complete ({} MiB), restarting block sync.", + self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)), + ); + self.warp_sync = None; + self.mode = SyncMode::Full; + output.extend(self.restart()); + } + let gap_sync_complete = + self.gap_sync.as_ref().map_or(false, |s| s.target == number); + if gap_sync_complete { + info!( + target: LOG_TARGET, + "Block history download is complete." + ); + self.gap_sync = None; + } + }, + Err(BlockImportError::IncompleteHeader(who)) => + if let Some(peer) = who { + warn!( + target: LOG_TARGET, + "💔 Peer sent block with incomplete header to import", + ); + output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); + output.extend(self.restart()); + }, + Err(BlockImportError::VerificationFailed(who, e)) => { + let extra_message = + who.map_or_else(|| "".into(), |peer| format!(" received from ({peer})")); + + warn!( + target: LOG_TARGET, + "💔 Verification failed for block {hash:?}{extra_message}: {e:?}", + ); + + if let Some(peer) = who { + output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); + } + + output.extend(self.restart()); + }, + Err(BlockImportError::BadBlock(who)) => + if let Some(peer) = who { + warn!( + target: LOG_TARGET, + "💔 Block {hash:?} received from peer {peer} has been blacklisted", + ); + output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); + }, + Err(BlockImportError::MissingState) => { + // This may happen if the chain we were requesting upon has been discarded + // in the meantime because other chain has been finalized. + // Don't mark it as bad as it still may be synced if explicitly requested. + trace!(target: LOG_TARGET, "Obsolete block {hash:?}"); + }, + e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { + warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); + self.state_sync = None; + self.warp_sync = None; + output.extend(self.restart()); + }, + Err(BlockImportError::Cancelled) => {}, + }; + } + + self.allowed_requests.set_all(); + Box::new(output.into_iter()) + } +} + +// This is purely during a backwards compatible transitionary period and should be removed +// once we can assume all nodes can send and receive multiple Justifications +// The ID tag is hardcoded here to avoid depending on the GRANDPA crate. +// See: https://github.com/paritytech/substrate/issues/8172 +fn legacy_justification_mapping( + justification: Option, +) -> Option { + justification.map(|just| (*b"FRNK", just).into()) +} + +/// Request the ancestry for a block. Sends a request for header and justification for the given +/// block number. Used during ancestry search. +fn ancestry_request(block: NumberFor) -> BlockRequest { + BlockRequest:: { + id: 0, + fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, + from: FromBlock::Number(block), + direction: Direction::Ascending, + max: Some(1), + } +} + +/// The ancestor search state expresses which algorithm, and its stateful parameters, we are using +/// to try to find an ancestor block +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub(crate) enum AncestorSearchState { + /// Use exponential backoff to find an ancestor, then switch to binary search. + /// We keep track of the exponent. + ExponentialBackoff(NumberFor), + /// Using binary search to find the best ancestor. + /// We keep track of left and right bounds. + BinarySearch(NumberFor, NumberFor), +} + +/// This function handles the ancestor search strategy used. The goal is to find a common point +/// that both our chains agree on that is as close to the tip as possible. +/// The way this works is we first have an exponential backoff strategy, where we try to step +/// forward until we find a block hash mismatch. The size of the step doubles each step we take. +/// +/// When we've found a block hash mismatch we then fall back to a binary search between the two +/// last known points to find the common block closest to the tip. +fn handle_ancestor_search_state( + state: &AncestorSearchState, + curr_block_num: NumberFor, + block_hash_match: bool, +) -> Option<(AncestorSearchState, NumberFor)> { + let two = >::one() + >::one(); + match state { + AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { + let next_distance_to_tip = *next_distance_to_tip; + if block_hash_match && next_distance_to_tip == One::one() { + // We found the ancestor in the first step so there is no need to execute binary + // search. + return None + } + if block_hash_match { + let left = curr_block_num; + let right = left + next_distance_to_tip / two; + let middle = left + (right - left) / two; + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } else { + let next_block_num = + curr_block_num.checked_sub(&next_distance_to_tip).unwrap_or_else(Zero::zero); + let next_distance_to_tip = next_distance_to_tip * two; + Some(( + AncestorSearchState::ExponentialBackoff(next_distance_to_tip), + next_block_num, + )) + } + }, + AncestorSearchState::BinarySearch(mut left, mut right) => { + if left >= curr_block_num { + return None + } + if block_hash_match { + left = curr_block_num; + } else { + right = curr_block_num; + } + assert!(right >= left); + let middle = left + (right - left) / two; + if middle == curr_block_num { + None + } else { + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } + }, + } +} + +/// Get a new block request for the peer if any. +fn peer_block_request( + id: &PeerId, + peer: &PeerSync, + blocks: &mut BlockCollection, + attrs: BlockAttributes, + max_parallel_downloads: u32, + max_blocks_per_request: u32, + finalized: NumberFor, + best_num: NumberFor, +) -> Option<(Range>, BlockRequest)> { + if best_num >= peer.best_number { + // Will be downloaded as alternative fork instead. + return None + } else if peer.common_number < finalized { + trace!( + target: LOG_TARGET, + "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", + id, peer.common_number, finalized, peer.best_number, best_num, + ); + } + let range = blocks.needed_blocks( + *id, + max_blocks_per_request, + peer.best_number, + peer.common_number, + max_parallel_downloads, + MAX_DOWNLOAD_AHEAD, + )?; + + // The end is not part of the range. + let last = range.end.saturating_sub(One::one()); + + let from = if peer.best_number == last { + FromBlock::Hash(peer.best_hash) + } else { + FromBlock::Number(last) + }; + + let request = BlockRequest:: { + id: 0, + fields: attrs, + from, + direction: Direction::Descending, + max: Some((range.end - range.start).saturated_into::()), + }; + + Some((range, request)) +} + +/// Get a new block request for the peer if any. +fn peer_gap_block_request( + id: &PeerId, + peer: &PeerSync, + blocks: &mut BlockCollection, + attrs: BlockAttributes, + target: NumberFor, + common_number: NumberFor, + max_blocks_per_request: u32, +) -> Option<(Range>, BlockRequest)> { + let range = blocks.needed_blocks( + *id, + max_blocks_per_request, + std::cmp::min(peer.best_number, target), + common_number, + 1, + MAX_DOWNLOAD_AHEAD, + )?; + + // The end is not part of the range. + let last = range.end.saturating_sub(One::one()); + let from = FromBlock::Number(last); + + let request = BlockRequest:: { + id: 0, + fields: attrs, + from, + direction: Direction::Descending, + max: Some((range.end - range.start).saturated_into::()), + }; + Some((range, request)) +} + +/// Get pending fork sync targets for a peer. +fn fork_sync_request( + id: &PeerId, + targets: &mut HashMap>, + best_num: NumberFor, + finalized: NumberFor, + attributes: BlockAttributes, + check_block: impl Fn(&B::Hash) -> BlockStatus, + max_blocks_per_request: u32, +) -> Option<(B::Hash, BlockRequest)> { + targets.retain(|hash, r| { + if r.number <= finalized { + trace!( + target: LOG_TARGET, + "Removed expired fork sync request {:?} (#{})", + hash, + r.number, + ); + return false + } + if check_block(hash) != BlockStatus::Unknown { + trace!( + target: LOG_TARGET, + "Removed obsolete fork sync request {:?} (#{})", + hash, + r.number, + ); + return false + } + true + }); + for (hash, r) in targets { + if !r.peers.contains(&id) { + continue + } + // Download the fork only if it is behind or not too far ahead our tip of the chain + // Otherwise it should be downloaded in full sync mode. + if r.number <= best_num || + (r.number - best_num).saturated_into::() < max_blocks_per_request as u32 + { + let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); + let count = if parent_status == BlockStatus::Unknown { + (r.number - finalized).saturated_into::() // up to the last finalized block + } else { + // request only single block + 1 + }; + trace!( + target: LOG_TARGET, + "Downloading requested fork {hash:?} from {id}, {count} blocks", + ); + return Some(( + *hash, + BlockRequest:: { + id: 0, + fields: attributes, + from: FromBlock::Hash(*hash), + direction: Direction::Descending, + max: Some(count), + }, + )) + } else { + trace!(target: LOG_TARGET, "Fork too far in the future: {:?} (#{})", hash, r.number); + } + } + None +} + +/// Returns `true` if the given `block` is a descendent of `base`. +fn is_descendent_of( + client: &T, + base: &Block::Hash, + block: &Block::Hash, +) -> sp_blockchain::Result +where + Block: BlockT, + T: HeaderMetadata + ?Sized, +{ + if base == block { + return Ok(false) + } + + let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; + + Ok(ancestor.hash == *base) +} + +/// Validate that the given `blocks` are correct. +/// Returns the number of the first block in the sequence. +/// +/// It is expected that `blocks` are in ascending order. +fn validate_blocks( + blocks: &Vec>, + who: &PeerId, + request: Option>, +) -> Result>, BadPeer> { + if let Some(request) = request { + if Some(blocks.len() as _) > request.max { + debug!( + target: LOG_TARGET, + "Received more blocks than requested from {}. Expected in maximum {:?}, got {}.", + who, + request.max, + blocks.len(), + ); + + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + } + + let block_header = + if request.direction == Direction::Descending { blocks.last() } else { blocks.first() } + .and_then(|b| b.header.as_ref()); + + let expected_block = block_header.as_ref().map_or(false, |h| match request.from { + FromBlock::Hash(hash) => h.hash() == hash, + FromBlock::Number(n) => h.number() == &n, + }); + + if !expected_block { + debug!( + target: LOG_TARGET, + "Received block that was not requested. Requested {:?}, got {:?}.", + request.from, + block_header, + ); + + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + } + + if request.fields.contains(BlockAttributes::HEADER) && + blocks.iter().any(|b| b.header.is_none()) + { + trace!( + target: LOG_TARGET, + "Missing requested header for a block in response from {who}.", + ); + + return Err(BadPeer(*who, rep::BAD_RESPONSE)) + } + + if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) + { + trace!( + target: LOG_TARGET, + "Missing requested body for a block in response from {who}.", + ); + + return Err(BadPeer(*who, rep::BAD_RESPONSE)) + } + } + + for b in blocks { + if let Some(header) = &b.header { + let hash = header.hash(); + if hash != b.hash { + debug!( + target:LOG_TARGET, + "Bad header received from {}. Expected hash {:?}, got {:?}", + who, + b.hash, + hash, + ); + return Err(BadPeer(*who, rep::BAD_BLOCK)) + } + } + if let (Some(header), Some(body)) = (&b.header, &b.body) { + let expected = *header.extrinsics_root(); + let got = HashingFor::::ordered_trie_root( + body.iter().map(Encode::encode).collect(), + sp_runtime::StateVersion::V0, + ); + if expected != got { + debug!( + target:LOG_TARGET, + "Bad extrinsic root for a block {} received from {}. Expected {:?}, got {:?}", + b.hash, + who, + expected, + got, + ); + return Err(BadPeer(*who, rep::BAD_BLOCK)) + } + } + } + + Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number())) +} diff --git a/substrate/client/network/sync/src/chain_sync/test.rs b/substrate/client/network/sync/src/chain_sync/test.rs new file mode 100644 index 000000000000..6f9fea1b161b --- /dev/null +++ b/substrate/client/network/sync/src/chain_sync/test.rs @@ -0,0 +1,1085 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests of [`ChainSync`]. + +use super::*; +use crate::service::network::NetworkServiceProvider; +use futures::executor::block_on; +use sc_block_builder::BlockBuilderProvider; +use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}; +use sp_blockchain::HeaderBackend; +use substrate_test_runtime_client::{ + runtime::{Block, Hash, Header}, + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient, + TestClientBuilder, TestClientBuilderExt, +}; + +#[test] +fn processes_empty_response_on_justification_request_for_unknown_block() { + // if we ask for a justification for a given block to a peer that doesn't know that block + // (different from not having a justification), the peer will reply with an empty response. + // internally we should process the response as the justification not being available. + + let client = Arc::new(TestClientBuilder::new().build()); + let peer_id = PeerId::random(); + + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 1, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + let (a1_hash, a1_number) = { + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + (a1.hash(), *a1.header.number()) + }; + + // add a new peer with the same best block + sync.new_peer(peer_id, a1_hash, a1_number).unwrap(); + + // and request a justification for the block + sync.request_justification(&a1_hash, a1_number); + + // the justification request should be scheduled to that peer + assert!(sync + .justification_requests() + .iter() + .any(|(who, request)| { *who == peer_id && request.from == FromBlock::Hash(a1_hash) })); + + // there are no extra pending requests + assert_eq!(sync.extra_justifications.pending_requests().count(), 0); + + // there's one in-flight extra request to the expected peer + assert!(sync.extra_justifications.active_requests().any(|(who, (hash, number))| { + *who == peer_id && *hash == a1_hash && *number == a1_number + })); + + // if the peer replies with an empty response (i.e. it doesn't know the block), + // the active request should be cleared. + assert_eq!( + sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }), + Ok(OnBlockJustification::Nothing), + ); + + // there should be no in-flight requests + assert_eq!(sync.extra_justifications.active_requests().count(), 0); + + // and the request should now be pending again, waiting for reschedule + assert!(sync + .extra_justifications + .pending_requests() + .any(|(hash, number)| { *hash == a1_hash && *number == a1_number })); +} + +#[test] +fn restart_doesnt_affect_peers_downloading_finality_data() { + let mut client = Arc::new(TestClientBuilder::new().build()); + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 1, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + let peer_id3 = PeerId::random(); + + let mut new_blocks = |n| { + for _ in 0..n { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + } + + let info = client.info(); + (info.best_hash, info.best_number) + }; + + let (b1_hash, b1_number) = new_blocks(50); + + // add 2 peers at blocks that we don't have locally + sync.new_peer(peer_id1, Hash::random(), 42).unwrap(); + sync.new_peer(peer_id2, Hash::random(), 10).unwrap(); + + // we wil send block requests to these peers + // for these blocks we don't know about + assert!(sync + .block_requests() + .into_iter() + .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); + + // add a new peer at a known block + sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); + + // we request a justification for a block we have locally + sync.request_justification(&b1_hash, b1_number); + + // the justification request should be scheduled to the + // new peer which is at the given block + assert!(sync.justification_requests().iter().any(|(p, r)| { + *p == peer_id3 && + r.fields == BlockAttributes::JUSTIFICATION && + r.from == FromBlock::Hash(b1_hash) + })); + + assert_eq!( + sync.peers.get(&peer_id3).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + + // we restart the sync state + let block_requests = sync.restart(); + + // which should make us send out block requests to the first two peers + assert!(block_requests.map(|r| r.unwrap()).all(|event| match event { + BlockRequestAction::SendRequest { peer_id, .. } => + peer_id == peer_id1 || peer_id == peer_id2, + BlockRequestAction::RemoveStale { .. } => false, + })); + + // peer 3 should be unaffected it was downloading finality data + assert_eq!( + sync.peers.get(&peer_id3).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + + // Set common block to something that we don't have (e.g. failed import) + sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; + let _ = sync.restart().count(); + assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); +} + +/// Send a block annoucnement for the given `header`. +fn send_block_announce(header: Header, peer_id: PeerId, sync: &mut ChainSync) { + let announce = BlockAnnounce { + header: header.clone(), + state: Some(BlockState::Best), + data: Some(Vec::new()), + }; + + sync.on_validated_block_announce(true, peer_id, &announce); +} + +/// Create a block response from the given `blocks`. +fn create_block_response(blocks: Vec) -> BlockResponse { + BlockResponse:: { + id: 0, + blocks: blocks + .into_iter() + .map(|b| BlockData:: { + hash: b.hash(), + header: Some(b.header().clone()), + body: Some(b.deconstruct().1), + indexed_body: None, + receipt: None, + message_queue: None, + justification: None, + justifications: None, + }) + .collect(), + } +} + +/// Get a block request from `sync` and check that is matches the expected request. +fn get_block_request( + sync: &mut ChainSync, + from: FromBlock, + max: u32, + peer: &PeerId, +) -> BlockRequest { + let requests = sync.block_requests(); + + log::trace!(target: LOG_TARGET, "Requests: {requests:?}"); + + assert_eq!(1, requests.len()); + assert_eq!(*peer, requests[0].0); + + let request = requests[0].1.clone(); + + assert_eq!(from, request.from); + assert_eq!(Some(max), request.max); + request +} + +/// Build and import a new best block. +fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { + let at = at.unwrap_or_else(|| client.info().best_hash); + + let mut block_builder = client.new_block_at(at, Default::default(), false).unwrap(); + + if fork { + block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); + } + + let block = block_builder.build().unwrap().block; + + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + block +} + +/// This test is a regression test as observed on a real network. +/// +/// The node is connected to multiple peers. Both of these peers are having a best block (1) +/// that is below our best block (3). Now peer 2 announces a fork of block 3 that we will +/// request from peer 2. After importing the fork, peer 2 and then peer 1 will announce block 4. +/// But as peer 1 in our view is still at block 1, we will request block 2 (which we already +/// have) from it. In the meanwhile peer 2 sends us block 4 and 3 and we send another request +/// for block 2 to peer 2. Peer 1 answers with block 2 and then peer 2. This will need to +/// succeed, as we have requested block 2 from both peers. +#[test] +fn do_not_report_peer_on_block_response_for_block_request() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 5, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + + let mut client2 = client.clone(); + let mut build_block_at = |at, import| { + let mut block_builder = client2.new_block_at(at, Default::default(), false).unwrap(); + // Make sure we generate a different block as fork + block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); + + let block = block_builder.build().unwrap().block; + + if import { + block_on(client2.import(BlockOrigin::Own, block.clone())).unwrap(); + } + + block + }; + + let block1 = build_block(&mut client, None, false); + let block2 = build_block(&mut client, None, false); + let block3 = build_block(&mut client, None, false); + let block3_fork = build_block_at(block2.hash(), false); + + // Add two peers which are on block 1. + sync.new_peer(peer_id1, block1.hash(), 1).unwrap(); + sync.new_peer(peer_id2, block1.hash(), 1).unwrap(); + + // Tell sync that our best block is 3. + sync.update_chain_info(&block3.hash(), 3); + + // There should be no requests. + assert!(sync.block_requests().is_empty()); + + // Let peer2 announce a fork of block 3 + send_block_announce(block3_fork.header().clone(), peer_id2, &mut sync); + + // Import and tell sync that we now have the fork. + block_on(client.import(BlockOrigin::Own, block3_fork.clone())).unwrap(); + sync.update_chain_info(&block3_fork.hash(), 3); + + let block4 = build_block_at(block3_fork.hash(), false); + + // Let peer2 announce block 4 and check that sync wants to get the block. + send_block_announce(block4.header().clone(), peer_id2, &mut sync); + + let request = get_block_request(&mut sync, FromBlock::Hash(block4.hash()), 2, &peer_id2); + + // Peer1 announces the same block, but as the common block is still `1`, sync will request + // block 2 again. + send_block_announce(block4.header().clone(), peer_id1, &mut sync); + + let request2 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id1); + + let response = create_block_response(vec![block4.clone(), block3_fork.clone()]); + let res = sync.on_block_data(&peer_id2, Some(request), response).unwrap(); + + // We should not yet import the blocks, because there is still an open request for fetching + // block `2` which blocks the import. + assert!( + matches!(res, OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty()) + ); + + let request3 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id2); + + let response = create_block_response(vec![block2.clone()]); + let res = sync.on_block_data(&peer_id1, Some(request2), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) + if blocks.iter().all(|b| [2, 3, 4].contains(b.header.as_ref().unwrap().number())) + )); + + let response = create_block_response(vec![block2.clone()]); + let res = sync.on_block_data(&peer_id2, Some(request3), response).unwrap(); + // Nothing to import + assert!( + matches!(res, OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty()) + ); +} + +fn unwrap_from_block_number(from: FromBlock) -> u64 { + if let FromBlock::Number(from) = from { + from + } else { + panic!("Expected a number!"); + } +} + +/// A regression test for a behavior we have seen on a live network. +/// +/// The scenario is that the node is doing a full resync and is connected to some node that is +/// doing a major sync as well. This other node that is doing a major sync will finish before +/// our node and send a block announcement message, but we don't have seen any block +/// announcement from this node in its sync process. Meaning our common number didn't change. It +/// is now expected that we start an ancestor search to find the common number. +#[test] +fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { + sp_tracing::try_init_simple(); + + let blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + (0..MAX_DOWNLOAD_AHEAD * 2) + .map(|_| build_block(&mut client, None, false)) + .collect::>() + }; + + let mut client = Arc::new(TestClientBuilder::new().build()); + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + let info = client.info(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 5, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + + let best_block = blocks.last().unwrap().clone(); + let max_blocks_to_request = sync.max_blocks_per_request; + // Connect the node we will sync from + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) + .unwrap(); + sync.new_peer(peer_id2, info.best_hash, 0).unwrap(); + + let mut best_block_num = 0; + while best_block_num < MAX_DOWNLOAD_AHEAD { + let request = get_block_request( + &mut sync, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, + &peer_id1, + ); + + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize + ),); + + best_block_num += max_blocks_to_request as u32; + + let _ = sync.on_blocks_processed( + max_blocks_to_request as usize, + max_blocks_to_request as usize, + resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + *b.header().number(), + Default::default(), + Some(peer_id1), + )), + b.hash(), + ) + }) + .collect(), + ); + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import_as_final(BlockOrigin::Own, b)).unwrap()); + } + + // "Wait" for the queue to clear + sync.queue_blocks.clear(); + + // Let peer2 announce that it finished syncing + send_block_announce(best_block.header().clone(), peer_id2, &mut sync); + + let (peer1_req, peer2_req) = + sync.block_requests().into_iter().fold((None, None), |res, req| { + if req.0 == peer_id1 { + (Some(req.1), res.1) + } else if req.0 == peer_id2 { + (res.0, Some(req.1)) + } else { + panic!("Unexpected req: {:?}", req) + } + }); + + // We should now do an ancestor search to find the correct common block. + let peer2_req = peer2_req.unwrap(); + assert_eq!(Some(1), peer2_req.max); + assert_eq!(FromBlock::Number(best_block_num as u64), peer2_req.from); + + let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); + let res = sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty() + ),); + + let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); + + // As we are on the same chain, we should directly continue with requesting blocks from + // peer 2 as well. + get_block_request( + &mut sync, + FromBlock::Number(peer1_from + max_blocks_to_request as u64), + max_blocks_to_request as u32, + &peer_id2, + ); +} + +/// A test that ensures that we can sync a huge fork. +/// +/// The following scenario: +/// A peer connects to us and we both have the common block 512. The last finalized is 2048. +/// Our best block is 4096. The peer send us a block announcement with 4097 from a fork. +/// +/// We will first do an ancestor search to find the common block. After that we start to sync +/// the fork and finish it ;) +#[test] +fn can_sync_huge_fork() { + sp_tracing::try_init_simple(); + + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) + .map(|_| build_block(&mut client, None, false)) + .collect::>(); + + let fork_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] + .into_iter() + .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) + .cloned() + .collect::>(); + + fork_blocks + .into_iter() + .chain( + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) + .map(|_| build_block(&mut client, None, true)), + ) + .collect::>() + }; + + let info = client.info(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 5, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); + let just = (*b"TEST", Vec::new()); + client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); + sync.update_chain_info(&info.best_hash, info.best_number); + + let peer_id1 = PeerId::random(); + + let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) + .unwrap(); + + send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); + + let mut request = + get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + + // Do the ancestor search + loop { + let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; + let response = create_block_response(vec![block.clone()]); + + let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + request = if let OnBlockData::Request(_peer, request) = on_block_data { + request + } else { + // We found the ancenstor + break + }; + + log::trace!(target: LOG_TARGET, "Request: {request:?}"); + } + + // Now request and import the fork. + let mut best_block_num = *finalized_block.header().number() as u32; + let max_blocks_to_request = sync.max_blocks_per_request; + while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { + let request = get_block_request( + &mut sync, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, + &peer_id1, + ); + + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == sync.max_blocks_per_request as usize + ),); + + best_block_num += sync.max_blocks_per_request as u32; + + let _ = sync.on_blocks_processed( + max_blocks_to_request as usize, + max_blocks_to_request as usize, + resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + *b.header().number(), + Default::default(), + Some(peer_id1), + )), + b.hash(), + ) + }) + .collect(), + ); + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); + } + + // Request the tip + get_block_request(&mut sync, FromBlock::Hash(fork_blocks.last().unwrap().hash()), 1, &peer_id1); +} + +#[test] +fn syncs_fork_without_duplicate_requests() { + sp_tracing::try_init_simple(); + + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) + .map(|_| build_block(&mut client, None, false)) + .collect::>(); + + let fork_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] + .into_iter() + .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) + .cloned() + .collect::>(); + + fork_blocks + .into_iter() + .chain( + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) + .map(|_| build_block(&mut client, None, true)), + ) + .collect::>() + }; + + let info = client.info(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 5, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); + let just = (*b"TEST", Vec::new()); + client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); + sync.update_chain_info(&info.best_hash, info.best_number); + + let peer_id1 = PeerId::random(); + + let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) + .unwrap(); + + send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); + + let mut request = + get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + + // Do the ancestor search + loop { + let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; + let response = create_block_response(vec![block.clone()]); + + let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + request = if let OnBlockData::Request(_peer, request) = on_block_data { + request + } else { + // We found the ancenstor + break + }; + + log::trace!(target: LOG_TARGET, "Request: {request:?}"); + } + + // Now request and import the fork. + let mut best_block_num = *finalized_block.header().number() as u32; + let max_blocks_to_request = sync.max_blocks_per_request; + + let mut request = get_block_request( + &mut sync, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, + &peer_id1, + ); + let last_block_num = *fork_blocks.last().unwrap().header().number() as u32 - 1; + while best_block_num < last_block_num { + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + let res = sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize + ),); + + best_block_num += max_blocks_to_request as u32; + + if best_block_num < last_block_num { + // make sure we're not getting a duplicate request in the time before the blocks are + // processed + request = get_block_request( + &mut sync, + FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), + max_blocks_to_request as u32, + &peer_id1, + ); + } + + let mut notify_imported: Vec<_> = resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + *b.header().number(), + Default::default(), + Some(peer_id1), + )), + b.hash(), + ) + }) + .collect(); + + // The import queue may send notifications in batches of varying size. So we simulate + // this here by splitting the batch into 2 notifications. + let max_blocks_to_request = sync.max_blocks_per_request; + let second_batch = notify_imported.split_off(notify_imported.len() / 2); + let _ = sync.on_blocks_processed( + max_blocks_to_request as usize, + max_blocks_to_request as usize, + notify_imported, + ); + + let _ = sync.on_blocks_processed( + max_blocks_to_request as usize, + max_blocks_to_request as usize, + second_batch, + ); + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); + } + + // Request the tip + get_block_request(&mut sync, FromBlock::Hash(fork_blocks.last().unwrap().hash()), 1, &peer_id1); +} + +#[test] +fn removes_target_fork_on_disconnect() { + sp_tracing::try_init_simple(); + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 1, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + let peer_id1 = PeerId::random(); + let common_block = blocks[1].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) + .unwrap(); + + // Create a "new" header and announce it + let mut header = blocks[0].header().clone(); + header.number = 4; + send_block_announce(header, peer_id1, &mut sync); + assert!(sync.fork_targets.len() == 1); + + let _ = sync.peer_disconnected(&peer_id1); + assert!(sync.fork_targets.len() == 0); +} + +#[test] +fn can_import_response_with_missing_blocks() { + sp_tracing::try_init_simple(); + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + let mut client2 = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::>(); + + let empty_client = Arc::new(TestClientBuilder::new().build()); + + let mut sync = ChainSync::new( + SyncMode::Full, + empty_client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 1, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + let peer_id1 = PeerId::random(); + let best_block = blocks[3].clone(); + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) + .unwrap(); + + sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; + sync.peers.get_mut(&peer_id1).unwrap().common_number = 0; + + // Request all missing blocks and respond only with some. + let request = get_block_request(&mut sync, FromBlock::Hash(best_block.hash()), 4, &peer_id1); + let response = + create_block_response(vec![blocks[3].clone(), blocks[2].clone(), blocks[1].clone()]); + sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + assert_eq!(sync.best_queued_number, 0); + + // Request should only contain the missing block. + let request = get_block_request(&mut sync, FromBlock::Number(1), 1, &peer_id1); + let response = create_block_response(vec![blocks[0].clone()]); + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + assert_eq!(sync.best_queued_number, 4); +} +#[test] +fn ancestor_search_repeat() { + let state = AncestorSearchState::::BinarySearch(1, 3); + assert!(handle_ancestor_search_state(&state, 2, true).is_none()); +} + +#[test] +fn sync_restart_removes_block_but_not_justification_requests() { + let mut client = Arc::new(TestClientBuilder::new().build()); + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 1, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + let peers = vec![PeerId::random(), PeerId::random()]; + + let mut new_blocks = |n| { + for _ in 0..n { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + } + + let info = client.info(); + (info.best_hash, info.best_number) + }; + + let (b1_hash, b1_number) = new_blocks(50); + + // add new peer and request blocks from them + sync.new_peer(peers[0], Hash::random(), 42).unwrap(); + + // we don't actually perform any requests, just keep track of peers waiting for a response + let mut pending_responses = HashSet::new(); + + // we wil send block requests to these peers + // for these blocks we don't know about + for (peer, _request) in sync.block_requests() { + // "send" request + pending_responses.insert(peer); + } + + // add a new peer at a known block + sync.new_peer(peers[1], b1_hash, b1_number).unwrap(); + + // we request a justification for a block we have locally + sync.request_justification(&b1_hash, b1_number); + + // the justification request should be scheduled to the + // new peer which is at the given block + let mut requests = sync.justification_requests(); + assert_eq!(requests.len(), 1); + let (peer, _request) = requests.remove(0); + // "send" request + assert!(pending_responses.insert(peer)); + + assert!(!std::matches!( + sync.peers.get(&peers[0]).unwrap().state, + PeerSyncState::DownloadingJustification(_), + )); + assert_eq!( + sync.peers.get(&peers[1]).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + assert_eq!(pending_responses.len(), 2); + + // restart sync + let request_events = sync.restart().collect::>(); + for event in request_events.iter() { + match event.as_ref().unwrap() { + BlockRequestAction::RemoveStale { peer_id } => { + pending_responses.remove(&peer_id); + }, + BlockRequestAction::SendRequest { peer_id, .. } => { + // we drop obsolete response, but don't register a new request, it's checked in + // the `assert!` below + pending_responses.remove(&peer_id); + }, + } + } + assert!(request_events.iter().any(|event| { + match event.as_ref().unwrap() { + BlockRequestAction::RemoveStale { .. } => false, + BlockRequestAction::SendRequest { peer_id, .. } => peer_id == &peers[0], + } + })); + + assert_eq!(pending_responses.len(), 1); + assert!(pending_responses.contains(&peers[1])); + assert_eq!( + sync.peers.get(&peers[1]).unwrap().state, + PeerSyncState::DownloadingJustification(b1_hash), + ); + let _ = sync.peer_disconnected(&peers[1]); + pending_responses.remove(&peers[1]); + assert_eq!(pending_responses.len(), 0); +} + +/// The test demonstrates https://github.com/paritytech/polkadot-sdk/issues/2094. +/// TODO: convert it into desired behavior test once the issue is fixed (see inline comments). +/// The issue: we currently rely on block numbers instead of block hash +/// to download blocks from peers. As a result, we can end up with blocks +/// from different forks as shown by the test. +#[test] +#[should_panic] +fn request_across_forks() { + sp_tracing::try_init_simple(); + + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..100).map(|_| build_block(&mut client, None, false)).collect::>(); + + let fork_a_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let mut fork_blocks = blocks[..] + .into_iter() + .inspect(|b| { + assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_)))); + block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap() + }) + .cloned() + .collect::>(); + for _ in 0..10 { + fork_blocks.push(build_block(&mut client, None, false)); + } + fork_blocks + }; + + let fork_b_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let mut fork_blocks = blocks[..] + .into_iter() + .inspect(|b| { + assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_)))); + block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap() + }) + .cloned() + .collect::>(); + for _ in 0..10 { + fork_blocks.push(build_block(&mut client, None, true)); + } + fork_blocks + }; + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + ProtocolName::from("test-block-announce-protocol"), + 5, + 64, + None, + chain_sync_network_handle, + ) + .unwrap(); + + // Add the peers, all at the common ancestor 100. + let common_block = blocks.last().unwrap(); + let peer_id1 = PeerId::random(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) + .unwrap(); + let peer_id2 = PeerId::random(); + sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()) + .unwrap(); + + // Peer 1 announces 107 from fork 1, 100-107 get downloaded. + { + let block = (&fork_a_blocks[106]).clone(); + let peer = peer_id1; + log::trace!(target: LOG_TARGET, "<1> {peer} announces from fork 1"); + send_block_announce(block.header().clone(), peer, &mut sync); + let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 7, &peer); + let mut resp_blocks = fork_a_blocks[100_usize..107_usize].to_vec(); + resp_blocks.reverse(); + let response = create_block_response(resp_blocks.clone()); + let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 7_usize + ),); + assert_eq!(sync.best_queued_number, 107); + assert_eq!(sync.best_queued_hash, block.hash()); + assert!(sync.is_known(&block.header.parent_hash())); + } + + // Peer 2 also announces 107 from fork 1. + { + let prev_best_number = sync.best_queued_number; + let prev_best_hash = sync.best_queued_hash; + let peer = peer_id2; + log::trace!(target: LOG_TARGET, "<2> {peer} announces from fork 1"); + for i in 100..107 { + let block = (&fork_a_blocks[i]).clone(); + send_block_announce(block.header().clone(), peer, &mut sync); + assert!(sync.block_requests().is_empty()); + } + assert_eq!(sync.best_queued_number, prev_best_number); + assert_eq!(sync.best_queued_hash, prev_best_hash); + } + + // Peer 2 undergoes reorg, announces 108 from fork 2, gets downloaded even though we + // don't have the parent from fork 2. + { + let block = (&fork_b_blocks[107]).clone(); + let peer = peer_id2; + log::trace!(target: LOG_TARGET, "<3> {peer} announces from fork 2"); + send_block_announce(block.header().clone(), peer, &mut sync); + // TODO: when the issue is fixed, this test can be changed to test the + // expected behavior instead. The needed changes would be: + // 1. Remove the `#[should_panic]` directive + // 2. These should be changed to check that sync.block_requests().is_empty(), after the + // block is announced. + let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 1, &peer); + let response = create_block_response(vec![block.clone()]); + let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 1_usize + ),); + assert!(sync.is_known(&block.header.parent_hash())); + } +} diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 1a383cdde479..0f689742bc58 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -24,12 +24,21 @@ use crate::{ BlockAnnounceValidationResult, BlockAnnounceValidator as BlockAnnounceValidatorStream, }, block_relay_protocol::{BlockDownloader, BlockResponseError}, + block_request_handler::MAX_BLOCKS_IN_RESPONSE, + chain_sync::{ + BlockRequestAction, ChainSync, ImportBlocksAction, ImportJustificationsAction, + OnBlockResponse, + }, pending_responses::{PendingResponses, ResponseEvent}, schema::v1::{StateRequest, StateResponse}, - service::{self, chain_sync::ToServiceCommand}, - warp::WarpSyncParams, - BlockRequestAction, ChainSync, ClientError, ImportBlocksAction, ImportJustificationsAction, - OnBlockResponse, SyncingService, + service::{ + self, + chain_sync::{SyncingService, ToServiceCommand}, + }, + types::{ + BadPeer, ExtendedPeerInfo, OpaqueStateRequest, OpaqueStateResponse, PeerRequest, SyncEvent, + }, + warp::{EncodedProof, WarpProofRequest, WarpSyncParams}, }; use codec::{Decode, Encode}; @@ -61,15 +70,10 @@ use sc_network::{ }; use sc_network_common::{ role::Roles, - sync::{ - message::{BlockAnnounce, BlockAnnouncesHandshake, BlockRequest, BlockState}, - warp::{EncodedProof, WarpProofRequest}, - BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, OpaqueStateRequest, - OpaqueStateResponse, PeerRequest, SyncEvent, - }, + sync::message::{BlockAnnounce, BlockAnnouncesHandshake, BlockRequest, BlockState}, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_blockchain::HeaderMetadata; +use sp_blockchain::{Error as ClientError, HeaderMetadata}; use sp_consensus::block_validation::BlockAnnounceValidator; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; @@ -363,18 +367,17 @@ where ) -> Result<(Self, SyncingService, NonDefaultSetConfig), ClientError> { let mode = net_config.network_config.sync_mode; let max_parallel_downloads = net_config.network_config.max_parallel_downloads; - let max_blocks_per_request = if net_config.network_config.max_blocks_per_request > - crate::MAX_BLOCKS_IN_RESPONSE as u32 - { - log::info!( - target: LOG_TARGET, - "clamping maximum blocks per request to {}", - crate::MAX_BLOCKS_IN_RESPONSE, - ); - crate::MAX_BLOCKS_IN_RESPONSE as u32 - } else { - net_config.network_config.max_blocks_per_request - }; + let max_blocks_per_request = + if net_config.network_config.max_blocks_per_request > MAX_BLOCKS_IN_RESPONSE as u32 { + log::info!( + target: LOG_TARGET, + "clamping maximum blocks per request to {}", + MAX_BLOCKS_IN_RESPONSE, + ); + MAX_BLOCKS_IN_RESPONSE as u32 + } else { + net_config.network_config.max_blocks_per_request + }; let cache_capacity = (net_config.network_config.default_peers_set.in_peers + net_config.network_config.default_peers_set.out_peers) .max(1); diff --git a/substrate/client/network/sync/src/extra_requests.rs b/substrate/client/network/sync/src/extra_requests.rs index 09e6bdb57399..8edd1a772e26 100644 --- a/substrate/client/network/sync/src/extra_requests.rs +++ b/substrate/client/network/sync/src/extra_requests.rs @@ -16,11 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{PeerSync, PeerSyncState}; +use crate::{ + chain_sync::{PeerSync, PeerSyncState}, + request_metrics::Metrics, +}; use fork_tree::ForkTree; use libp2p::PeerId; use log::{debug, trace, warn}; -use sc_network_common::sync::metrics::Metrics; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::{ @@ -343,7 +345,7 @@ impl<'a, B: BlockT> Matcher<'a, B> { #[cfg(test)] mod tests { use super::*; - use crate::PeerSync; + use crate::chain_sync::PeerSync; use quickcheck::{Arbitrary, Gen, QuickCheck}; use sp_blockchain::Error as ClientError; use sp_test_primitives::{Block, BlockNumber, Hash}; diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index 0c8e8a104e2a..c42b0601e659 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -16,67 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Contains the state of the chain synchronization process -//! -//! At any given point in time, a running node tries as much as possible to be at the head of the -//! chain. This module handles the logic of which blocks to request from remotes, and processing -//! responses. It yields blocks to check and potentially move to the database. -//! -//! # Usage -//! -//! The `ChainSync` struct maintains the state of the block requests. Whenever something happens on -//! the network, or whenever a block has been successfully verified, call the appropriate method in -//! order to update it. - -use crate::{ - blocks::BlockCollection, - schema::v1::StateResponse, - state::StateSync, - warp::{WarpProofImportResult, WarpSync, WarpSyncConfig}, -}; - -use codec::Encode; -use extra_requests::ExtraRequests; -use libp2p::PeerId; -use log::{debug, error, info, trace, warn}; - -use sc_client_api::{BlockBackend, ProofProvider}; -use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; -use sc_network::types::ProtocolName; -use sc_network_common::sync::{ - message::{ - BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, - FromBlock, - }, - warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress}, - BadPeer, ChainSync as ChainSyncT, ImportBlocksAction, Metrics, OnBlockData, - OnBlockJustification, OnStateData, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, SyncMode, - SyncState, SyncStatus, -}; -use sp_arithmetic::traits::Saturating; -use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; -use sp_consensus::{BlockOrigin, BlockStatus}; -use sp_runtime::{ - traits::{ - Block as BlockT, CheckedSub, Hash, HashingFor, Header as HeaderT, NumberFor, One, - SaturatedConversion, Zero, - }, - EncodedJustification, Justifications, -}; - -use std::{ - collections::{HashMap, HashSet}, - ops::Range, - sync::Arc, -}; +//! Blockchain syncing implementation in Substrate. pub use service::chain_sync::SyncingService; +pub use types::{SyncEvent, SyncEventStream, SyncState, SyncStatus, SyncStatusProvider}; mod block_announce_validator; +mod chain_sync; mod extra_requests; mod futures_stream; mod pending_responses; +mod request_metrics; mod schema; +mod types; pub mod block_relay_protocol; pub mod block_request_handler; @@ -88,3405 +40,3 @@ pub mod state; pub mod state_request_handler; pub mod warp; pub mod warp_request_handler; - -/// Log target for this file. -const LOG_TARGET: &'static str = "sync"; - -/// Maximum blocks to store in the import queue. -const MAX_IMPORTING_BLOCKS: usize = 2048; - -/// Maximum blocks to download ahead of any gap. -const MAX_DOWNLOAD_AHEAD: u32 = 2048; - -/// Maximum blocks to look backwards. The gap is the difference between the highest block and the -/// common block of a node. -const MAX_BLOCKS_TO_LOOK_BACKWARDS: u32 = MAX_DOWNLOAD_AHEAD / 2; - -/// Pick the state to sync as the latest finalized number minus this. -const STATE_SYNC_FINALITY_THRESHOLD: u32 = 8; - -/// We use a heuristic that with a high likelihood, by the time -/// `MAJOR_SYNC_BLOCKS` have been imported we'll be on the same -/// chain as (or at least closer to) the peer so we want to delay -/// the ancestor search to not waste time doing that when we are -/// so far behind. -const MAJOR_SYNC_BLOCKS: u8 = 5; - -/// Number of peers that need to be connected before warp sync is started. -const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; - -/// Maximum blocks per response. -pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128; - -mod rep { - use sc_network::ReputationChange as Rep; - /// Reputation change when a peer sent us a message that led to a - /// database read error. - pub const BLOCKCHAIN_READ_ERROR: Rep = Rep::new(-(1 << 16), "DB Error"); - - /// Reputation change when a peer sent us a status message with a different - /// genesis than us. - pub const GENESIS_MISMATCH: Rep = Rep::new(i32::MIN, "Genesis mismatch"); - - /// Reputation change for peers which send us a block with an incomplete header. - pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); - - /// Reputation change for peers which send us a block which we fail to verify. - pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed"); - - /// Reputation change for peers which send us a known bad block. - pub const BAD_BLOCK: Rep = Rep::new(-(1 << 29), "Bad block"); - - /// Peer did not provide us with advertised block data. - pub const NO_BLOCK: Rep = Rep::new(-(1 << 29), "No requested block data"); - - /// Reputation change for peers which send us non-requested block data. - pub const NOT_REQUESTED: Rep = Rep::new(-(1 << 29), "Not requested block data"); - - /// Reputation change for peers which send us a block with bad justifications. - pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); - - /// Reputation change when a peer sent us invlid ancestry result. - pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); - - /// Peer response data does not have requested bits. - pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); -} - -enum AllowedRequests { - Some(HashSet), - All, -} - -impl AllowedRequests { - fn add(&mut self, id: &PeerId) { - if let Self::Some(ref mut set) = self { - set.insert(*id); - } - } - - fn take(&mut self) -> Self { - std::mem::take(self) - } - - fn set_all(&mut self) { - *self = Self::All; - } - - fn contains(&self, id: &PeerId) -> bool { - match self { - Self::Some(set) => set.contains(id), - Self::All => true, - } - } - - fn is_empty(&self) -> bool { - match self { - Self::Some(set) => set.is_empty(), - Self::All => false, - } - } - - fn clear(&mut self) { - std::mem::take(self); - } -} - -impl Default for AllowedRequests { - fn default() -> Self { - Self::Some(HashSet::default()) - } -} - -struct GapSync { - blocks: BlockCollection, - best_queued_number: NumberFor, - target: NumberFor, -} - -/// Action that [`engine::SyncingEngine`] should perform after reporting imported blocks with -/// [`ChainSync::on_blocks_processed`]. -enum BlockRequestAction { - /// Send block request to peer. Always implies dropping a stale block request to the same peer. - SendRequest { peer_id: PeerId, request: BlockRequest }, - /// Drop stale block request. - RemoveStale { peer_id: PeerId }, -} - -/// Action that [`engine::SyncingEngine`] should perform if we want to import justifications. -struct ImportJustificationsAction { - peer_id: PeerId, - hash: B::Hash, - number: NumberFor, - justifications: Justifications, -} - -/// Action that [`engine::SyncingEngine`] should perform on behalf of [`ChainSync`] -/// after reporting block response with [`ChainSync::on_block_response`]. -enum OnBlockResponse { - /// Nothing to do - Nothing, - /// Perform block request. - SendBlockRequest { peer_id: PeerId, request: BlockRequest }, - /// Import blocks. - ImportBlocks(ImportBlocksAction), - /// Import justifications. - ImportJustifications(ImportJustificationsAction), -} - -/// The main data structure which contains all the state for a chains -/// active syncing strategy. -pub struct ChainSync { - /// Chain client. - client: Arc, - /// The active peers that we are using to sync and their PeerSync status - peers: HashMap>, - /// A `BlockCollection` of blocks that are being downloaded from peers - blocks: BlockCollection, - /// The best block number in our queue of blocks to import - best_queued_number: NumberFor, - /// The best block hash in our queue of blocks to import - best_queued_hash: B::Hash, - /// Current mode (full/light) - mode: SyncMode, - /// Any extra justification requests. - extra_justifications: ExtraRequests, - /// A set of hashes of blocks that are being downloaded or have been - /// downloaded and are queued for import. - queue_blocks: HashSet, - /// Fork sync targets. - fork_targets: HashMap>, - /// A set of peers for which there might be potential block requests - allowed_requests: AllowedRequests, - /// Maximum number of peers to ask the same blocks in parallel. - max_parallel_downloads: u32, - /// Maximum blocks per request. - max_blocks_per_request: u32, - /// Total number of downloaded blocks. - downloaded_blocks: usize, - /// State sync in progress, if any. - state_sync: Option>, - /// Warp sync in progress, if any. - warp_sync: Option>, - /// Warp sync configuration. - /// - /// Will be `None` after `self.warp_sync` is `Some(_)`. - warp_sync_config: Option>, - /// A temporary storage for warp sync target block until warp sync is initialized. - warp_sync_target_block_header: Option, - /// Enable importing existing blocks. This is used used after the state download to - /// catch up to the latest state while re-importing blocks. - import_existing: bool, - /// Gap download process. - gap_sync: Option>, - /// Handle for communicating with `NetworkService` - network_service: service::network::NetworkServiceHandle, - /// Protocol name used for block announcements - block_announce_protocol_name: ProtocolName, -} - -/// All the data we have about a Peer that we are trying to sync with -#[derive(Debug, Clone)] -pub struct PeerSync { - /// Peer id of this peer. - pub peer_id: PeerId, - /// The common number is the block number that is a common point of - /// ancestry for both our chains (as far as we know). - pub common_number: NumberFor, - /// The hash of the best block that we've seen for this peer. - pub best_hash: B::Hash, - /// The number of the best block that we've seen for this peer. - pub best_number: NumberFor, - /// The state of syncing this peer is in for us, generally categories - /// into `Available` or "busy" with something as defined by `PeerSyncState`. - pub state: PeerSyncState, -} - -impl PeerSync { - /// Update the `common_number` iff `new_common > common_number`. - fn update_common_number(&mut self, new_common: NumberFor) { - if self.common_number < new_common { - trace!( - target: LOG_TARGET, - "Updating peer {} common number from={} => to={}.", - self.peer_id, - self.common_number, - new_common, - ); - self.common_number = new_common; - } - } -} - -struct ForkTarget { - number: NumberFor, - parent_hash: Option, - peers: HashSet, -} - -/// The state of syncing between a Peer and ourselves. -/// -/// Generally two categories, "busy" or `Available`. If busy, the enum -/// defines what we are busy with. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum PeerSyncState { - /// Available for sync requests. - Available, - /// Searching for ancestors the Peer has in common with us. - AncestorSearch { start: NumberFor, current: NumberFor, state: AncestorSearchState }, - /// Actively downloading new blocks, starting from the given Number. - DownloadingNew(NumberFor), - /// Downloading a stale block with given Hash. Stale means that it is a - /// block with a number that is lower than our best number. It might be - /// from a fork and not necessarily already imported. - DownloadingStale(B::Hash), - /// Downloading justification for given block hash. - DownloadingJustification(B::Hash), - /// Downloading state. - DownloadingState, - /// Downloading warp proof. - DownloadingWarpProof, - /// Downloading warp sync target block. - DownloadingWarpTargetBlock, - /// Actively downloading block history after warp sync. - DownloadingGap(NumberFor), -} - -impl PeerSyncState { - pub fn is_available(&self) -> bool { - matches!(self, Self::Available) - } -} - -impl ChainSyncT for ChainSync -where - B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, -{ - fn peer_info(&self, who: &PeerId) -> Option> { - self.peers - .get(who) - .map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) - } - - /// Returns the current sync status. - fn status(&self) -> SyncStatus { - let median_seen = self.median_seen(); - let best_seen_block = - median_seen.and_then(|median| (median > self.best_queued_number).then_some(median)); - let sync_state = if let Some(target) = median_seen { - // A chain is classified as downloading if the provided best block is - // more than `MAJOR_SYNC_BLOCKS` behind the best block or as importing - // if the same can be said about queued blocks. - let best_block = self.client.info().best_number; - if target > best_block && target - best_block > MAJOR_SYNC_BLOCKS.into() { - // If target is not queued, we're downloading, otherwise importing. - if target > self.best_queued_number { - SyncState::Downloading { target } - } else { - SyncState::Importing { target } - } - } else { - SyncState::Idle - } - } else { - SyncState::Idle - }; - - let warp_sync_progress = match (&self.warp_sync, &self.mode, &self.gap_sync) { - (_, _, Some(gap_sync)) => Some(WarpSyncProgress { - phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), - total_bytes: 0, - }), - (None, SyncMode::Warp, _) => Some(WarpSyncProgress { - phase: WarpSyncPhase::AwaitingPeers { - required_peers: MIN_PEERS_TO_START_WARP_SYNC, - }, - total_bytes: 0, - }), - (Some(sync), _, _) => Some(sync.progress()), - _ => None, - }; - - SyncStatus { - state: sync_state, - best_seen_block, - num_peers: self.peers.len() as u32, - num_connected_peers: 0u32, - queued_blocks: self.queue_blocks.len() as u32, - state_sync: self.state_sync.as_ref().map(|s| s.progress()), - warp_sync: warp_sync_progress, - } - } - - fn num_sync_requests(&self) -> usize { - self.fork_targets - .values() - .filter(|f| f.number <= self.best_queued_number) - .count() - } - - fn num_downloaded_blocks(&self) -> usize { - self.downloaded_blocks - } - - fn num_peers(&self) -> usize { - self.peers.len() - } - - #[must_use] - fn new_peer( - &mut self, - who: PeerId, - best_hash: B::Hash, - best_number: NumberFor, - ) -> Result>, BadPeer> { - // There is nothing sync can get from the node that has no blockchain data. - match self.block_status(&best_hash) { - Err(e) => { - debug!(target:LOG_TARGET, "Error reading blockchain: {e}"); - Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) - }, - Ok(BlockStatus::KnownBad) => { - info!("💔 New peer with known bad best block {} ({}).", best_hash, best_number); - Err(BadPeer(who, rep::BAD_BLOCK)) - }, - Ok(BlockStatus::Unknown) => { - if best_number.is_zero() { - info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); - return Err(BadPeer(who, rep::GENESIS_MISMATCH)) - } - - // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have - // enough to do in the import queue that it's not worth kicking off - // an ancestor search, which is what we do in the next match case below. - if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { - debug!( - target:LOG_TARGET, - "New peer with unknown best hash {} ({}), assuming common block.", - self.best_queued_hash, - self.best_queued_number - ); - self.peers.insert( - who, - PeerSync { - peer_id: who, - common_number: self.best_queued_number, - best_hash, - best_number, - state: PeerSyncState::Available, - }, - ); - return Ok(None) - } - - // If we are at genesis, just start downloading. - let (state, req) = if self.best_queued_number.is_zero() { - debug!( - target:LOG_TARGET, - "New peer with best hash {best_hash} ({best_number}).", - ); - - (PeerSyncState::Available, None) - } else { - let common_best = std::cmp::min(self.best_queued_number, best_number); - - debug!( - target:LOG_TARGET, - "New peer with unknown best hash {} ({}), searching for common ancestor.", - best_hash, - best_number - ); - - ( - PeerSyncState::AncestorSearch { - current: common_best, - start: self.best_queued_number, - state: AncestorSearchState::ExponentialBackoff(One::one()), - }, - Some(ancestry_request::(common_best)), - ) - }; - - self.allowed_requests.add(&who); - self.peers.insert( - who, - PeerSync { - peer_id: who, - common_number: Zero::zero(), - best_hash, - best_number, - state, - }, - ); - - if let SyncMode::Warp = self.mode { - if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none() - { - log::debug!(target: LOG_TARGET, "Starting warp state sync."); - - if let Some(config) = self.warp_sync_config.take() { - let mut warp_sync = WarpSync::new(self.client.clone(), config); - if let Some(header) = self.warp_sync_target_block_header.take() { - warp_sync.set_target_block(header); - } - self.warp_sync = Some(warp_sync); - } - } - } - Ok(req) - }, - Ok(BlockStatus::Queued) | - Ok(BlockStatus::InChainWithState) | - Ok(BlockStatus::InChainPruned) => { - debug!( - target: LOG_TARGET, - "New peer with known best hash {best_hash} ({best_number}).", - ); - self.peers.insert( - who, - PeerSync { - peer_id: who, - common_number: std::cmp::min(self.best_queued_number, best_number), - best_hash, - best_number, - state: PeerSyncState::Available, - }, - ); - self.allowed_requests.add(&who); - Ok(None) - }, - } - } - - fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { - self.on_block_queued(best_hash, best_number); - } - - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - self.extra_justifications - .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) - } - - fn clear_justification_requests(&mut self) { - self.extra_justifications.reset(); - } - - // The implementation is similar to `on_validated_block_announce` with unknown parent hash. - fn set_sync_fork_request( - &mut self, - mut peers: Vec, - hash: &B::Hash, - number: NumberFor, - ) { - if peers.is_empty() { - peers = self - .peers - .iter() - // Only request blocks from peers who are ahead or on a par. - .filter(|(_, peer)| peer.best_number >= number) - .map(|(id, _)| *id) - .collect(); - - debug!( - target: LOG_TARGET, - "Explicit sync request for block {hash:?} with no peers specified. \ - Syncing from these peers {peers:?} instead.", - ); - } else { - debug!( - target: LOG_TARGET, - "Explicit sync request for block {hash:?} with {peers:?}", - ); - } - - if self.is_known(hash) { - debug!(target: LOG_TARGET, "Refusing to sync known hash {hash:?}"); - return - } - - trace!(target: LOG_TARGET, "Downloading requested old fork {hash:?}"); - for peer_id in &peers { - if let Some(peer) = self.peers.get_mut(peer_id) { - if let PeerSyncState::AncestorSearch { .. } = peer.state { - continue - } - - if number > peer.best_number { - peer.best_number = number; - peer.best_hash = *hash; - } - self.allowed_requests.add(peer_id); - } - } - - self.fork_targets - .entry(*hash) - .or_insert_with(|| ForkTarget { number, peers: Default::default(), parent_hash: None }) - .peers - .extend(peers); - } - - #[must_use] - fn on_block_data( - &mut self, - who: &PeerId, - request: Option>, - response: BlockResponse, - ) -> Result, BadPeer> { - self.downloaded_blocks += response.blocks.len(); - let mut gap = false; - let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { - let mut blocks = response.blocks; - if request.as_ref().map_or(false, |r| r.direction == Direction::Descending) { - trace!(target: LOG_TARGET, "Reversing incoming block list"); - blocks.reverse() - } - self.allowed_requests.add(who); - if let Some(request) = request { - match &mut peer.state { - PeerSyncState::DownloadingNew(_) => { - self.blocks.clear_peer_download(who); - peer.state = PeerSyncState::Available; - if let Some(start_block) = - validate_blocks::(&blocks, who, Some(request))? - { - self.blocks.insert(start_block, blocks, *who); - } - self.ready_blocks() - }, - PeerSyncState::DownloadingGap(_) => { - peer.state = PeerSyncState::Available; - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_peer_download(who); - if let Some(start_block) = - validate_blocks::(&blocks, who, Some(request))? - { - gap_sync.blocks.insert(start_block, blocks, *who); - } - gap = true; - let blocks: Vec<_> = gap_sync - .blocks - .ready_blocks(gap_sync.best_queued_number + One::one()) - .into_iter() - .map(|block_data| { - let justifications = - block_data.block.justifications.or_else(|| { - legacy_justification_mapping( - block_data.block.justification, - ) - }); - IncomingBlock { - hash: block_data.block.hash, - header: block_data.block.header, - body: block_data.block.body, - indexed_body: block_data.block.indexed_body, - justifications, - origin: block_data.origin, - allow_missing_state: true, - import_existing: self.import_existing, - skip_execution: true, - state: None, - } - }) - .collect(); - debug!( - target: LOG_TARGET, - "Drained {} gap blocks from {}", - blocks.len(), - gap_sync.best_queued_number, - ); - blocks - } else { - debug!(target: LOG_TARGET, "Unexpected gap block response from {who}"); - return Err(BadPeer(*who, rep::NO_BLOCK)) - } - }, - PeerSyncState::DownloadingStale(_) => { - peer.state = PeerSyncState::Available; - if blocks.is_empty() { - debug!(target: LOG_TARGET, "Empty block response from {who}"); - return Err(BadPeer(*who, rep::NO_BLOCK)) - } - validate_blocks::(&blocks, who, Some(request))?; - blocks - .into_iter() - .map(|b| { - let justifications = b - .justifications - .or_else(|| legacy_justification_mapping(b.justification)); - IncomingBlock { - hash: b.hash, - header: b.header, - body: b.body, - indexed_body: None, - justifications, - origin: Some(*who), - allow_missing_state: true, - import_existing: self.import_existing, - skip_execution: self.skip_execution(), - state: None, - } - }) - .collect() - }, - PeerSyncState::AncestorSearch { current, start, state } => { - let matching_hash = match (blocks.get(0), self.client.hash(*current)) { - (Some(block), Ok(maybe_our_block_hash)) => { - trace!( - target: LOG_TARGET, - "Got ancestry block #{} ({}) from peer {}", - current, - block.hash, - who, - ); - maybe_our_block_hash.filter(|x| x == &block.hash) - }, - (None, _) => { - debug!( - target: LOG_TARGET, - "Invalid response when searching for ancestor from {who}", - ); - return Err(BadPeer(*who, rep::UNKNOWN_ANCESTOR)) - }, - (_, Err(e)) => { - info!( - target: LOG_TARGET, - "❌ Error answering legitimate blockchain query: {e}", - ); - return Err(BadPeer(*who, rep::BLOCKCHAIN_READ_ERROR)) - }, - }; - if matching_hash.is_some() { - if *start < self.best_queued_number && - self.best_queued_number <= peer.best_number - { - // We've made progress on this chain since the search was started. - // Opportunistically set common number to updated number - // instead of the one that started the search. - peer.common_number = self.best_queued_number; - } else if peer.common_number < *current { - peer.common_number = *current; - } - } - if matching_hash.is_none() && current.is_zero() { - trace!( - target:LOG_TARGET, - "Ancestry search: genesis mismatch for peer {who}", - ); - return Err(BadPeer(*who, rep::GENESIS_MISMATCH)) - } - if let Some((next_state, next_num)) = - handle_ancestor_search_state(state, *current, matching_hash.is_some()) - { - peer.state = PeerSyncState::AncestorSearch { - current: next_num, - start: *start, - state: next_state, - }; - return Ok(OnBlockData::Request(*who, ancestry_request::(next_num))) - } else { - // Ancestry search is complete. Check if peer is on a stale fork unknown - // to us and add it to sync targets if necessary. - trace!( - target: LOG_TARGET, - "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", - self.best_queued_hash, - self.best_queued_number, - peer.best_hash, - peer.best_number, - matching_hash, - peer.common_number, - ); - if peer.common_number < peer.best_number && - peer.best_number < self.best_queued_number - { - trace!( - target: LOG_TARGET, - "Added fork target {} for {}", - peer.best_hash, - who, - ); - self.fork_targets - .entry(peer.best_hash) - .or_insert_with(|| ForkTarget { - number: peer.best_number, - parent_hash: None, - peers: Default::default(), - }) - .peers - .insert(*who); - } - peer.state = PeerSyncState::Available; - Vec::new() - } - }, - PeerSyncState::DownloadingWarpTargetBlock => { - peer.state = PeerSyncState::Available; - if let Some(warp_sync) = &mut self.warp_sync { - if blocks.len() == 1 { - validate_blocks::(&blocks, who, Some(request))?; - match warp_sync.import_target_block( - blocks.pop().expect("`blocks` len checked above."), - ) { - warp::TargetBlockImportResult::Success => - return Ok(OnBlockData::Continue), - warp::TargetBlockImportResult::BadResponse => - return Err(BadPeer(*who, rep::VERIFICATION_FAIL)), - } - } else if blocks.is_empty() { - debug!(target: LOG_TARGET, "Empty block response from {who}"); - return Err(BadPeer(*who, rep::NO_BLOCK)) - } else { - debug!( - target: LOG_TARGET, - "Too many blocks ({}) in warp target block response from {}", - blocks.len(), - who, - ); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - } - } else { - debug!( - target: LOG_TARGET, - "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", - who, - ); - return Ok(OnBlockData::Continue) - } - }, - PeerSyncState::Available | - PeerSyncState::DownloadingJustification(..) | - PeerSyncState::DownloadingState | - PeerSyncState::DownloadingWarpProof => Vec::new(), - } - } else { - // When request.is_none() this is a block announcement. Just accept blocks. - validate_blocks::(&blocks, who, None)?; - blocks - .into_iter() - .map(|b| { - let justifications = b - .justifications - .or_else(|| legacy_justification_mapping(b.justification)); - IncomingBlock { - hash: b.hash, - header: b.header, - body: b.body, - indexed_body: None, - justifications, - origin: Some(*who), - allow_missing_state: true, - import_existing: false, - skip_execution: true, - state: None, - } - }) - .collect() - } - } else { - // We don't know of this peer, so we also did not request anything from it. - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - }; - - Ok(OnBlockData::Import(self.validate_and_queue_blocks(new_blocks, gap))) - } - - #[must_use] - fn on_block_justification( - &mut self, - who: PeerId, - response: BlockResponse, - ) -> Result, BadPeer> { - let peer = if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!( - target: LOG_TARGET, - "💔 Called on_block_justification with a peer ID of an unknown peer", - ); - return Ok(OnBlockJustification::Nothing) - }; - - self.allowed_requests.add(&who); - if let PeerSyncState::DownloadingJustification(hash) = peer.state { - peer.state = PeerSyncState::Available; - - // We only request one justification at a time - let justification = if let Some(block) = response.blocks.into_iter().next() { - if hash != block.hash { - warn!( - target: LOG_TARGET, - "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", - who, - hash, - block.hash, - ); - return Err(BadPeer(who, rep::BAD_JUSTIFICATION)) - } - - block - .justifications - .or_else(|| legacy_justification_mapping(block.justification)) - } else { - // we might have asked the peer for a justification on a block that we assumed it - // had but didn't (regardless of whether it had a justification for it or not). - trace!( - target: LOG_TARGET, - "Peer {who:?} provided empty response for justification request {hash:?}", - ); - - None - }; - - if let Some((peer_id, hash, number, justifications)) = - self.extra_justifications.on_response(who, justification) - { - return Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) - } - } - - Ok(OnBlockJustification::Nothing) - } - - fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; - self.extra_justifications - .try_finalize_root((hash, number), finalization_result, true); - self.allowed_requests.set_all(); - } - - fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { - is_descendent_of(&**client, base, block) - }); - - if let SyncMode::LightState { skip_proofs, .. } = &self.mode { - if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { - // Finalized a recent block. - let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); - heads.sort(); - let median = heads[heads.len() / 2]; - if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { - if let Ok(Some(header)) = self.client.header(*hash) { - log::debug!( - target: LOG_TARGET, - "Starting state sync for #{number} ({hash})", - ); - self.state_sync = Some(StateSync::new( - self.client.clone(), - header, - None, - None, - *skip_proofs, - )); - self.allowed_requests.set_all(); - } - } - } - } - - if let Err(err) = r { - warn!( - target: LOG_TARGET, - "💔 Error cleaning up pending extra justification data requests: {err}", - ); - } - } - - fn on_validated_block_announce( - &mut self, - is_best: bool, - who: PeerId, - announce: &BlockAnnounce, - ) { - let number = *announce.header.number(); - let hash = announce.header.hash(); - let parent_status = - self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); - let known_parent = parent_status != BlockStatus::Unknown; - let ancient_parent = parent_status == BlockStatus::InChainPruned; - - let known = self.is_known(&hash); - let peer = if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID"); - return - }; - - if let PeerSyncState::AncestorSearch { .. } = peer.state { - trace!(target: LOG_TARGET, "Peer {} is in the ancestor search state.", who); - return - } - - if is_best { - // update their best block - peer.best_number = number; - peer.best_hash = hash; - } - - // If the announced block is the best they have and is not ahead of us, our common number - // is either one further ahead or it's the one they just announced, if we know about it. - if is_best { - if known && self.best_queued_number >= number { - self.update_peer_common_number(&who, number); - } else if announce.header.parent_hash() == &self.best_queued_hash || - known_parent && self.best_queued_number >= number - { - self.update_peer_common_number(&who, number.saturating_sub(One::one())); - } - } - self.allowed_requests.add(&who); - - // known block case - if known || self.is_already_downloading(&hash) { - trace!(target: "sync", "Known block announce from {}: {}", who, hash); - if let Some(target) = self.fork_targets.get_mut(&hash) { - target.peers.insert(who); - } - return - } - - if ancient_parent { - trace!( - target: "sync", - "Ignored ancient block announced from {}: {} {:?}", - who, - hash, - announce.header, - ); - return - } - - if self.status().state == SyncState::Idle { - trace!( - target: "sync", - "Added sync target for block announced from {}: {} {:?}", - who, - hash, - announce.summary(), - ); - self.fork_targets - .entry(hash) - .or_insert_with(|| ForkTarget { - number, - parent_hash: Some(*announce.header.parent_hash()), - peers: Default::default(), - }) - .peers - .insert(who); - } - } - - #[must_use] - fn peer_disconnected(&mut self, who: &PeerId) -> Option> { - self.blocks.clear_peer_download(who); - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_peer_download(who) - } - self.peers.remove(who); - self.extra_justifications.peer_disconnected(who); - self.allowed_requests.set_all(); - self.fork_targets.retain(|_, target| { - target.peers.remove(who); - !target.peers.is_empty() - }); - - let blocks = self.ready_blocks(); - - (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) - } - - fn metrics(&self) -> Metrics { - Metrics { - queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), - fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), - justifications: self.extra_justifications.metrics(), - } - } -} - -impl ChainSync -where - Self: ChainSyncT, - B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, -{ - /// Create a new instance. - pub fn new( - mode: SyncMode, - client: Arc, - block_announce_protocol_name: ProtocolName, - max_parallel_downloads: u32, - max_blocks_per_request: u32, - warp_sync_config: Option>, - network_service: service::network::NetworkServiceHandle, - ) -> Result { - let mut sync = Self { - client, - peers: HashMap::new(), - blocks: BlockCollection::new(), - best_queued_hash: Default::default(), - best_queued_number: Zero::zero(), - extra_justifications: ExtraRequests::new("justification"), - mode, - queue_blocks: Default::default(), - fork_targets: Default::default(), - allowed_requests: Default::default(), - max_parallel_downloads, - max_blocks_per_request, - downloaded_blocks: 0, - state_sync: None, - warp_sync: None, - import_existing: false, - gap_sync: None, - network_service, - warp_sync_config, - warp_sync_target_block_header: None, - block_announce_protocol_name, - }; - - sync.reset_sync_start_point()?; - Ok(sync) - } - - /// Returns the median seen block number. - fn median_seen(&self) -> Option> { - let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); - - if best_seens.is_empty() { - None - } else { - let middle = best_seens.len() / 2; - - // Not the "perfect median" when we have an even number of peers. - Some(*best_seens.select_nth_unstable(middle).1) - } - } - - fn required_block_attributes(&self) -> BlockAttributes { - match self.mode { - SyncMode::Full => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, - SyncMode::LightState { storage_chain_mode: true, .. } => - BlockAttributes::HEADER | - BlockAttributes::JUSTIFICATION | - BlockAttributes::INDEXED_BODY, - } - } - - fn skip_execution(&self) -> bool { - match self.mode { - SyncMode::Full => false, - SyncMode::LightState { .. } => true, - SyncMode::Warp => true, - } - } - - fn validate_and_queue_blocks( - &mut self, - mut new_blocks: Vec>, - gap: bool, - ) -> ImportBlocksAction { - let orig_len = new_blocks.len(); - new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); - if new_blocks.len() != orig_len { - debug!( - target: LOG_TARGET, - "Ignoring {} blocks that are already queued", - orig_len - new_blocks.len(), - ); - } - - let origin = if !gap && !self.status().state.is_major_syncing() { - BlockOrigin::NetworkBroadcast - } else { - BlockOrigin::NetworkInitialSync - }; - - if let Some((h, n)) = new_blocks - .last() - .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) - { - trace!( - target:LOG_TARGET, - "Accepted {} blocks ({:?}) with origin {:?}", - new_blocks.len(), - h, - origin, - ); - self.on_block_queued(h, n) - } - self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); - - ImportBlocksAction { origin, blocks: new_blocks } - } - - fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { - if let Some(peer) = self.peers.get_mut(peer_id) { - peer.update_common_number(new_common); - } - } - - /// Called when a block has been queued for import. - /// - /// Updates our internal state for best queued block and then goes - /// through all peers to update our view of their state as well. - fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { - if self.fork_targets.remove(hash).is_some() { - trace!(target: LOG_TARGET, "Completed fork sync {hash:?}"); - } - if let Some(gap_sync) = &mut self.gap_sync { - if number > gap_sync.best_queued_number && number <= gap_sync.target { - gap_sync.best_queued_number = number; - } - } - if number > self.best_queued_number { - self.best_queued_number = number; - self.best_queued_hash = *hash; - // Update common blocks - for (n, peer) in self.peers.iter_mut() { - if let PeerSyncState::AncestorSearch { .. } = peer.state { - // Wait for ancestry search to complete first. - continue - } - let new_common_number = - if peer.best_number >= number { number } else { peer.best_number }; - trace!( - target: LOG_TARGET, - "Updating peer {} info, ours={}, common={}->{}, their best={}", - n, - number, - peer.common_number, - new_common_number, - peer.best_number, - ); - peer.common_number = new_common_number; - } - } - self.allowed_requests.set_all(); - } - - /// Restart the sync process. This will reset all pending block requests and return an iterator - /// of new block requests to make to peers. Peers that were downloading finality data (i.e. - /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. - fn restart(&mut self) -> impl Iterator, BadPeer>> + '_ { - self.blocks.clear(); - if let Err(e) = self.reset_sync_start_point() { - warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); - } - self.allowed_requests.set_all(); - debug!( - target: LOG_TARGET, - "Restarted with {} ({})", - self.best_queued_number, - self.best_queued_hash, - ); - let old_peers = std::mem::take(&mut self.peers); - - old_peers.into_iter().filter_map(move |(peer_id, mut p)| { - // peers that were downloading justifications - // should be kept in that state. - if let PeerSyncState::DownloadingJustification(_) = p.state { - // We make sure our commmon number is at least something we have. - p.common_number = self.best_queued_number; - self.peers.insert(peer_id, p); - return None - } - - // handle peers that were in other states. - match self.new_peer(peer_id, p.best_hash, p.best_number) { - // since the request is not a justification, remove it from pending responses - Ok(None) => Some(Ok(BlockRequestAction::RemoveStale { peer_id })), - // update the request if the new one is available - Ok(Some(request)) => Some(Ok(BlockRequestAction::SendRequest { peer_id, request })), - // this implies that we need to drop pending response from the peer - Err(e) => Some(Err(e)), - } - }) - } - - /// Find a block to start sync from. If we sync with state, that's the latest block we have - /// state for. - fn reset_sync_start_point(&mut self) -> Result<(), ClientError> { - let info = self.client.info(); - if matches!(self.mode, SyncMode::LightState { .. }) && info.finalized_state.is_some() { - warn!( - target: LOG_TARGET, - "Can't use fast sync mode with a partially synced database. Reverting to full sync mode." - ); - self.mode = SyncMode::Full; - } - if matches!(self.mode, SyncMode::Warp) && info.finalized_state.is_some() { - warn!( - target: LOG_TARGET, - "Can't use warp sync mode with a partially synced database. Reverting to full sync mode." - ); - self.mode = SyncMode::Full; - } - self.import_existing = false; - self.best_queued_hash = info.best_hash; - self.best_queued_number = info.best_number; - - if self.mode == SyncMode::Full && - self.client.block_status(info.best_hash)? != BlockStatus::InChainWithState - { - self.import_existing = true; - // Latest state is missing, start with the last finalized state or genesis instead. - if let Some((hash, number)) = info.finalized_state { - debug!(target: LOG_TARGET, "Starting from finalized state #{number}"); - self.best_queued_hash = hash; - self.best_queued_number = number; - } else { - debug!(target: LOG_TARGET, "Restarting from genesis"); - self.best_queued_hash = Default::default(); - self.best_queued_number = Zero::zero(); - } - } - - if let Some((start, end)) = info.block_gap { - debug!(target: LOG_TARGET, "Starting gap sync #{start} - #{end}"); - self.gap_sync = Some(GapSync { - best_queued_number: start - One::one(), - target: end, - blocks: BlockCollection::new(), - }); - } - trace!( - target: LOG_TARGET, - "Restarted sync at #{} ({:?})", - self.best_queued_number, - self.best_queued_hash, - ); - Ok(()) - } - - /// What is the status of the block corresponding to the given hash? - fn block_status(&self, hash: &B::Hash) -> Result { - if self.queue_blocks.contains(hash) { - return Ok(BlockStatus::Queued) - } - self.client.block_status(*hash) - } - - /// Is the block corresponding to the given hash known? - fn is_known(&self, hash: &B::Hash) -> bool { - self.block_status(hash).ok().map_or(false, |s| s != BlockStatus::Unknown) - } - - /// Is any peer downloading the given hash? - fn is_already_downloading(&self, hash: &B::Hash) -> bool { - self.peers - .iter() - .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) - } - - /// Is the peer know to the sync state machine? - pub fn is_peer_known(&self, peer_id: &PeerId) -> bool { - self.peers.contains_key(peer_id) - } - - /// Get the set of downloaded blocks that are ready to be queued for import. - fn ready_blocks(&mut self) -> Vec> { - self.blocks - .ready_blocks(self.best_queued_number + One::one()) - .into_iter() - .map(|block_data| { - let justifications = block_data - .block - .justifications - .or_else(|| legacy_justification_mapping(block_data.block.justification)); - IncomingBlock { - hash: block_data.block.hash, - header: block_data.block.header, - body: block_data.block.body, - indexed_body: block_data.block.indexed_body, - justifications, - origin: block_data.origin, - allow_missing_state: true, - import_existing: self.import_existing, - skip_execution: self.skip_execution(), - state: None, - } - }) - .collect() - } - - /// Set warp sync target block externally in case we skip warp proof downloading. - pub fn set_warp_sync_target_block(&mut self, header: B::Header) { - if let Some(ref mut warp_sync) = self.warp_sync { - warp_sync.set_target_block(header); - } else { - self.warp_sync_target_block_header = Some(header); - } - } - - /// Generate block request for downloading of the target block body during warp sync. - fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest)> { - let sync = &self.warp_sync.as_ref()?; - - if self.allowed_requests.is_empty() || - sync.is_complete() || - self.peers - .iter() - .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpTargetBlock) - { - // Only one pending warp target block request is allowed. - return None - } - - if let Some((target_number, request)) = sync.next_target_block_request() { - // Find a random peer that has a block with the target number. - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.best_number >= target_number { - trace!(target: LOG_TARGET, "New warp target block request for {id}"); - peer.state = PeerSyncState::DownloadingWarpTargetBlock; - self.allowed_requests.clear(); - return Some((*id, request)) - } - } - } - - None - } - - /// Process blocks received in a response. - #[must_use] - pub(crate) fn on_block_response( - &mut self, - peer_id: PeerId, - request: BlockRequest, - blocks: Vec>, - ) -> OnBlockResponse { - let block_response = BlockResponse:: { id: request.id, blocks }; - - let blocks_range = || match ( - block_response - .blocks - .first() - .and_then(|b| b.header.as_ref().map(|h| h.number())), - block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), - ) { - (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), - (Some(first), Some(_)) => format!(" ({})", first), - _ => Default::default(), - }; - trace!( - target: LOG_TARGET, - "BlockResponse {} from {} with {} blocks {}", - block_response.id, - peer_id, - block_response.blocks.len(), - blocks_range(), - ); - - if request.fields == BlockAttributes::JUSTIFICATION { - match self.on_block_justification(peer_id, block_response) { - Ok(OnBlockJustification::Nothing) => OnBlockResponse::Nothing, - Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) => - OnBlockResponse::ImportJustifications(ImportJustificationsAction { - peer_id, - hash, - number, - justifications, - }), - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu); - OnBlockResponse::Nothing - }, - } - } else { - match self.on_block_data(&peer_id, Some(request), block_response) { - Ok(OnBlockData::Import(action)) => OnBlockResponse::ImportBlocks(action), - Ok(OnBlockData::Request(peer_id, request)) => - OnBlockResponse::SendBlockRequest { peer_id, request }, - Ok(OnBlockData::Continue) => OnBlockResponse::Nothing, - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu); - OnBlockResponse::Nothing - }, - } - } - } - - /// Process state received in a response. - #[must_use] - pub fn on_state_response( - &mut self, - peer_id: PeerId, - response: OpaqueStateResponse, - ) -> Option> { - match self.on_state_data(&peer_id, response) { - Ok(OnStateData::Import(origin, block)) => - Some(ImportBlocksAction { origin, blocks: vec![block] }), - Ok(OnStateData::Continue) => None, - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu); - None - }, - } - } - - pub fn on_warp_sync_response(&mut self, peer_id: PeerId, response: EncodedProof) { - if let Err(BadPeer(id, repu)) = self.on_warp_sync_data(&peer_id, response) { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu); - } - } - - fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { - let peers = &mut self.peers; - let mut matcher = self.extra_justifications.matcher(); - std::iter::from_fn(move || { - if let Some((peer, request)) = matcher.next(peers) { - peers - .get_mut(&peer) - .expect( - "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", - ) - .state = PeerSyncState::DownloadingJustification(request.0); - let req = BlockRequest:: { - id: 0, - fields: BlockAttributes::JUSTIFICATION, - from: FromBlock::Hash(request.0), - direction: Direction::Ascending, - max: Some(1), - }; - Some((peer, req)) - } else { - None - } - }) - .collect() - } - - fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { - if self.mode == SyncMode::Warp { - return self - .warp_target_block_request() - .map_or_else(|| Vec::new(), |req| Vec::from([req])) - } - - if self.allowed_requests.is_empty() || self.state_sync.is_some() { - return Vec::new() - } - - if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { - trace!(target: LOG_TARGET, "Too many blocks in the queue."); - return Vec::new() - } - let is_major_syncing = self.status().state.is_major_syncing(); - let attrs = self.required_block_attributes(); - let blocks = &mut self.blocks; - let fork_targets = &mut self.fork_targets; - let last_finalized = - std::cmp::min(self.best_queued_number, self.client.info().finalized_number); - let best_queued = self.best_queued_number; - let client = &self.client; - let queue = &self.queue_blocks; - let allowed_requests = self.allowed_requests.take(); - let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; - let max_blocks_per_request = self.max_blocks_per_request; - let gap_sync = &mut self.gap_sync; - self.peers - .iter_mut() - .filter_map(move |(&id, peer)| { - if !peer.state.is_available() || !allowed_requests.contains(&id) { - return None - } - - // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from - // the common number, the peer best number is higher than our best queued and the - // common number is smaller than the last finalized block number, we should do an - // ancestor search to find a better common block. If the queue is full we wait till - // all blocks are imported though. - if best_queued.saturating_sub(peer.common_number) > - MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && - best_queued < peer.best_number && - peer.common_number < last_finalized && - queue.len() <= MAJOR_SYNC_BLOCKS.into() - { - trace!( - target: LOG_TARGET, - "Peer {:?} common block {} too far behind of our best {}. Starting ancestry search.", - id, - peer.common_number, - best_queued, - ); - let current = std::cmp::min(peer.best_number, best_queued); - peer.state = PeerSyncState::AncestorSearch { - current, - start: best_queued, - state: AncestorSearchState::ExponentialBackoff(One::one()), - }; - Some((id, ancestry_request::(current))) - } else if let Some((range, req)) = peer_block_request( - &id, - peer, - blocks, - attrs, - max_parallel, - max_blocks_per_request, - last_finalized, - best_queued, - ) { - peer.state = PeerSyncState::DownloadingNew(range.start); - trace!( - target: LOG_TARGET, - "New block request for {}, (best:{}, common:{}) {:?}", - id, - peer.best_number, - peer.common_number, - req, - ); - Some((id, req)) - } else if let Some((hash, req)) = fork_sync_request( - &id, - fork_targets, - best_queued, - last_finalized, - attrs, - |hash| { - if queue.contains(hash) { - BlockStatus::Queued - } else { - client.block_status(*hash).unwrap_or(BlockStatus::Unknown) - } - }, - max_blocks_per_request, - ) { - trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {id}"); - peer.state = PeerSyncState::DownloadingStale(hash); - Some((id, req)) - } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { - peer_gap_block_request( - &id, - peer, - &mut sync.blocks, - attrs, - sync.target, - sync.best_queued_number, - max_blocks_per_request, - ) - }) { - peer.state = PeerSyncState::DownloadingGap(range.start); - trace!( - target: LOG_TARGET, - "New gap block request for {}, (best:{}, common:{}) {:?}", - id, - peer.best_number, - peer.common_number, - req, - ); - Some((id, req)) - } else { - None - } - }) - .collect() - } - - fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { - if self.allowed_requests.is_empty() { - return None - } - if (self.state_sync.is_some() || self.warp_sync.is_some()) && - self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) - { - // Only one pending state request is allowed. - return None - } - if let Some(sync) = &self.state_sync { - if sync.is_complete() { - return None - } - - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.common_number >= sync.target_block_num() { - peer.state = PeerSyncState::DownloadingState; - let request = sync.next_request(); - trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request); - self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))) - } - } - } - if let Some(sync) = &self.warp_sync { - if sync.is_complete() { - return None - } - if let (Some(request), Some(target)) = - (sync.next_state_request(), sync.target_block_number()) - { - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.best_number >= target { - trace!(target: LOG_TARGET, "New StateRequest for {id}: {request:?}"); - peer.state = PeerSyncState::DownloadingState; - self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))) - } - } - } - } - None - } - - fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { - if let Some(sync) = &self.warp_sync { - if self.allowed_requests.is_empty() || - sync.is_complete() || - self.peers - .iter() - .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpProof) - { - // Only one pending state request is allowed. - return None - } - if let Some(request) = sync.next_warp_proof_request() { - let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); - if !targets.is_empty() { - targets.sort(); - let median = targets[targets.len() / 2]; - // Find a random peer that is synced as much as peer majority. - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.best_number >= median { - trace!(target: LOG_TARGET, "New WarpProofRequest for {id}"); - peer.state = PeerSyncState::DownloadingWarpProof; - self.allowed_requests.clear(); - return Some((*id, request)) - } - } - } - } - } - None - } - - fn on_state_data( - &mut self, - who: &PeerId, - response: OpaqueStateResponse, - ) -> Result, BadPeer> { - let response: Box = response.0.downcast().map_err(|_error| { - error!( - target: LOG_TARGET, - "Failed to downcast opaque state response, this is an implementation bug." - ); - - BadPeer(*who, rep::BAD_RESPONSE) - })?; - - if let Some(peer) = self.peers.get_mut(who) { - if let PeerSyncState::DownloadingState = peer.state { - peer.state = PeerSyncState::Available; - self.allowed_requests.set_all(); - } - } - let import_result = if let Some(sync) = &mut self.state_sync { - debug!( - target: LOG_TARGET, - "Importing state data from {} with {} keys, {} proof nodes.", - who, - response.entries.len(), - response.proof.len(), - ); - sync.import(*response) - } else if let Some(sync) = &mut self.warp_sync { - debug!( - target: LOG_TARGET, - "Importing state data from {} with {} keys, {} proof nodes.", - who, - response.entries.len(), - response.proof.len(), - ); - sync.import_state(*response) - } else { - debug!(target: LOG_TARGET, "Ignored obsolete state response from {who}"); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - }; - - match import_result { - state::ImportResult::Import(hash, header, state, body, justifications) => { - let origin = BlockOrigin::NetworkInitialSync; - let block = IncomingBlock { - hash, - header: Some(header), - body, - indexed_body: None, - justifications, - origin: None, - allow_missing_state: true, - import_existing: true, - skip_execution: self.skip_execution(), - state: Some(state), - }; - debug!(target: LOG_TARGET, "State download is complete. Import is queued"); - Ok(OnStateData::Import(origin, block)) - }, - state::ImportResult::Continue => Ok(OnStateData::Continue), - state::ImportResult::BadResponse => { - debug!(target: LOG_TARGET, "Bad state data received from {who}"); - Err(BadPeer(*who, rep::BAD_BLOCK)) - }, - } - } - - fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer> { - if let Some(peer) = self.peers.get_mut(who) { - if let PeerSyncState::DownloadingWarpProof = peer.state { - peer.state = PeerSyncState::Available; - self.allowed_requests.set_all(); - } - } - let import_result = if let Some(sync) = &mut self.warp_sync { - debug!( - target: LOG_TARGET, - "Importing warp proof data from {}, {} bytes.", - who, - response.0.len(), - ); - sync.import_warp_proof(response) - } else { - debug!(target: LOG_TARGET, "Ignored obsolete warp sync response from {who}"); - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - }; - - match import_result { - WarpProofImportResult::Success => Ok(()), - WarpProofImportResult::BadResponse => { - debug!(target: LOG_TARGET, "Bad proof data received from {who}"); - Err(BadPeer(*who, rep::BAD_BLOCK)) - }, - } - } - - /// A batch of blocks have been processed, with or without errors. - /// - /// Call this when a batch of blocks have been processed by the import - /// queue, with or without errors. If an error is returned, the pending response - /// from the peer must be dropped. - #[must_use] - fn on_blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> Box, BadPeer>>> { - trace!(target: LOG_TARGET, "Imported {imported} of {count}"); - - let mut output = Vec::new(); - - let mut has_error = false; - for (_, hash) in &results { - self.queue_blocks.remove(hash); - self.blocks.clear_queued(hash); - if let Some(gap_sync) = &mut self.gap_sync { - gap_sync.blocks.clear_queued(hash); - } - } - for (result, hash) in results { - if has_error { - break - } - - has_error |= result.is_err(); - - match result { - Ok(BlockImportStatus::ImportedKnown(number, who)) => - if let Some(peer) = who { - self.update_peer_common_number(&peer, number); - }, - Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { - if aux.clear_justification_requests { - trace!( - target: LOG_TARGET, - "Block imported clears all pending justification requests {number}: {hash:?}", - ); - self.clear_justification_requests(); - } - - if aux.needs_justification { - trace!( - target: LOG_TARGET, - "Block imported but requires justification {number}: {hash:?}", - ); - self.request_justification(&hash, number); - } - - if aux.bad_justification { - if let Some(ref peer) = who { - warn!("💔 Sent block with bad justification to import"); - output.push(Err(BadPeer(*peer, rep::BAD_JUSTIFICATION))); - } - } - - if let Some(peer) = who { - self.update_peer_common_number(&peer, number); - } - let state_sync_complete = - self.state_sync.as_ref().map_or(false, |s| s.target() == hash); - if state_sync_complete { - info!( - target: LOG_TARGET, - "State sync is complete ({} MiB), restarting block sync.", - self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), - ); - self.state_sync = None; - self.mode = SyncMode::Full; - output.extend(self.restart()); - } - let warp_sync_complete = self - .warp_sync - .as_ref() - .map_or(false, |s| s.target_block_hash() == Some(hash)); - if warp_sync_complete { - info!( - target: LOG_TARGET, - "Warp sync is complete ({} MiB), restarting block sync.", - self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)), - ); - self.warp_sync = None; - self.mode = SyncMode::Full; - output.extend(self.restart()); - } - let gap_sync_complete = - self.gap_sync.as_ref().map_or(false, |s| s.target == number); - if gap_sync_complete { - info!( - target: LOG_TARGET, - "Block history download is complete." - ); - self.gap_sync = None; - } - }, - Err(BlockImportError::IncompleteHeader(who)) => - if let Some(peer) = who { - warn!( - target: LOG_TARGET, - "💔 Peer sent block with incomplete header to import", - ); - output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); - output.extend(self.restart()); - }, - Err(BlockImportError::VerificationFailed(who, e)) => { - let extra_message = - who.map_or_else(|| "".into(), |peer| format!(" received from ({peer})")); - - warn!( - target: LOG_TARGET, - "💔 Verification failed for block {hash:?}{extra_message}: {e:?}", - ); - - if let Some(peer) = who { - output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); - } - - output.extend(self.restart()); - }, - Err(BlockImportError::BadBlock(who)) => - if let Some(peer) = who { - warn!( - target: LOG_TARGET, - "💔 Block {hash:?} received from peer {peer} has been blacklisted", - ); - output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); - }, - Err(BlockImportError::MissingState) => { - // This may happen if the chain we were requesting upon has been discarded - // in the meantime because other chain has been finalized. - // Don't mark it as bad as it still may be synced if explicitly requested. - trace!(target: LOG_TARGET, "Obsolete block {hash:?}"); - }, - e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { - warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); - self.state_sync = None; - self.warp_sync = None; - output.extend(self.restart()); - }, - Err(BlockImportError::Cancelled) => {}, - }; - } - - self.allowed_requests.set_all(); - Box::new(output.into_iter()) - } -} - -// This is purely during a backwards compatible transitionary period and should be removed -// once we can assume all nodes can send and receive multiple Justifications -// The ID tag is hardcoded here to avoid depending on the GRANDPA crate. -// See: https://github.com/paritytech/substrate/issues/8172 -fn legacy_justification_mapping( - justification: Option, -) -> Option { - justification.map(|just| (*b"FRNK", just).into()) -} - -/// Request the ancestry for a block. Sends a request for header and justification for the given -/// block number. Used during ancestry search. -fn ancestry_request(block: NumberFor) -> BlockRequest { - BlockRequest:: { - id: 0, - fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - from: FromBlock::Number(block), - direction: Direction::Ascending, - max: Some(1), - } -} - -/// The ancestor search state expresses which algorithm, and its stateful parameters, we are using -/// to try to find an ancestor block -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum AncestorSearchState { - /// Use exponential backoff to find an ancestor, then switch to binary search. - /// We keep track of the exponent. - ExponentialBackoff(NumberFor), - /// Using binary search to find the best ancestor. - /// We keep track of left and right bounds. - BinarySearch(NumberFor, NumberFor), -} - -/// This function handles the ancestor search strategy used. The goal is to find a common point -/// that both our chains agree on that is as close to the tip as possible. -/// The way this works is we first have an exponential backoff strategy, where we try to step -/// forward until we find a block hash mismatch. The size of the step doubles each step we take. -/// -/// When we've found a block hash mismatch we then fall back to a binary search between the two -/// last known points to find the common block closest to the tip. -fn handle_ancestor_search_state( - state: &AncestorSearchState, - curr_block_num: NumberFor, - block_hash_match: bool, -) -> Option<(AncestorSearchState, NumberFor)> { - let two = >::one() + >::one(); - match state { - AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { - let next_distance_to_tip = *next_distance_to_tip; - if block_hash_match && next_distance_to_tip == One::one() { - // We found the ancestor in the first step so there is no need to execute binary - // search. - return None - } - if block_hash_match { - let left = curr_block_num; - let right = left + next_distance_to_tip / two; - let middle = left + (right - left) / two; - Some((AncestorSearchState::BinarySearch(left, right), middle)) - } else { - let next_block_num = - curr_block_num.checked_sub(&next_distance_to_tip).unwrap_or_else(Zero::zero); - let next_distance_to_tip = next_distance_to_tip * two; - Some(( - AncestorSearchState::ExponentialBackoff(next_distance_to_tip), - next_block_num, - )) - } - }, - AncestorSearchState::BinarySearch(mut left, mut right) => { - if left >= curr_block_num { - return None - } - if block_hash_match { - left = curr_block_num; - } else { - right = curr_block_num; - } - assert!(right >= left); - let middle = left + (right - left) / two; - if middle == curr_block_num { - None - } else { - Some((AncestorSearchState::BinarySearch(left, right), middle)) - } - }, - } -} - -/// Get a new block request for the peer if any. -fn peer_block_request( - id: &PeerId, - peer: &PeerSync, - blocks: &mut BlockCollection, - attrs: BlockAttributes, - max_parallel_downloads: u32, - max_blocks_per_request: u32, - finalized: NumberFor, - best_num: NumberFor, -) -> Option<(Range>, BlockRequest)> { - if best_num >= peer.best_number { - // Will be downloaded as alternative fork instead. - return None - } else if peer.common_number < finalized { - trace!( - target: LOG_TARGET, - "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", - id, peer.common_number, finalized, peer.best_number, best_num, - ); - } - let range = blocks.needed_blocks( - *id, - max_blocks_per_request, - peer.best_number, - peer.common_number, - max_parallel_downloads, - MAX_DOWNLOAD_AHEAD, - )?; - - // The end is not part of the range. - let last = range.end.saturating_sub(One::one()); - - let from = if peer.best_number == last { - FromBlock::Hash(peer.best_hash) - } else { - FromBlock::Number(last) - }; - - let request = BlockRequest:: { - id: 0, - fields: attrs, - from, - direction: Direction::Descending, - max: Some((range.end - range.start).saturated_into::()), - }; - - Some((range, request)) -} - -/// Get a new block request for the peer if any. -fn peer_gap_block_request( - id: &PeerId, - peer: &PeerSync, - blocks: &mut BlockCollection, - attrs: BlockAttributes, - target: NumberFor, - common_number: NumberFor, - max_blocks_per_request: u32, -) -> Option<(Range>, BlockRequest)> { - let range = blocks.needed_blocks( - *id, - max_blocks_per_request, - std::cmp::min(peer.best_number, target), - common_number, - 1, - MAX_DOWNLOAD_AHEAD, - )?; - - // The end is not part of the range. - let last = range.end.saturating_sub(One::one()); - let from = FromBlock::Number(last); - - let request = BlockRequest:: { - id: 0, - fields: attrs, - from, - direction: Direction::Descending, - max: Some((range.end - range.start).saturated_into::()), - }; - Some((range, request)) -} - -/// Get pending fork sync targets for a peer. -fn fork_sync_request( - id: &PeerId, - targets: &mut HashMap>, - best_num: NumberFor, - finalized: NumberFor, - attributes: BlockAttributes, - check_block: impl Fn(&B::Hash) -> BlockStatus, - max_blocks_per_request: u32, -) -> Option<(B::Hash, BlockRequest)> { - targets.retain(|hash, r| { - if r.number <= finalized { - trace!( - target: LOG_TARGET, - "Removed expired fork sync request {:?} (#{})", - hash, - r.number, - ); - return false - } - if check_block(hash) != BlockStatus::Unknown { - trace!( - target: LOG_TARGET, - "Removed obsolete fork sync request {:?} (#{})", - hash, - r.number, - ); - return false - } - true - }); - for (hash, r) in targets { - if !r.peers.contains(&id) { - continue - } - // Download the fork only if it is behind or not too far ahead our tip of the chain - // Otherwise it should be downloaded in full sync mode. - if r.number <= best_num || - (r.number - best_num).saturated_into::() < max_blocks_per_request as u32 - { - let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); - let count = if parent_status == BlockStatus::Unknown { - (r.number - finalized).saturated_into::() // up to the last finalized block - } else { - // request only single block - 1 - }; - trace!( - target: LOG_TARGET, - "Downloading requested fork {hash:?} from {id}, {count} blocks", - ); - return Some(( - *hash, - BlockRequest:: { - id: 0, - fields: attributes, - from: FromBlock::Hash(*hash), - direction: Direction::Descending, - max: Some(count), - }, - )) - } else { - trace!(target: LOG_TARGET, "Fork too far in the future: {:?} (#{})", hash, r.number); - } - } - None -} - -/// Returns `true` if the given `block` is a descendent of `base`. -fn is_descendent_of( - client: &T, - base: &Block::Hash, - block: &Block::Hash, -) -> sp_blockchain::Result -where - Block: BlockT, - T: HeaderMetadata + ?Sized, -{ - if base == block { - return Ok(false) - } - - let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; - - Ok(ancestor.hash == *base) -} - -/// Validate that the given `blocks` are correct. -/// Returns the number of the first block in the sequence. -/// -/// It is expected that `blocks` are in ascending order. -fn validate_blocks( - blocks: &Vec>, - who: &PeerId, - request: Option>, -) -> Result>, BadPeer> { - if let Some(request) = request { - if Some(blocks.len() as _) > request.max { - debug!( - target: LOG_TARGET, - "Received more blocks than requested from {}. Expected in maximum {:?}, got {}.", - who, - request.max, - blocks.len(), - ); - - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - } - - let block_header = - if request.direction == Direction::Descending { blocks.last() } else { blocks.first() } - .and_then(|b| b.header.as_ref()); - - let expected_block = block_header.as_ref().map_or(false, |h| match request.from { - FromBlock::Hash(hash) => h.hash() == hash, - FromBlock::Number(n) => h.number() == &n, - }); - - if !expected_block { - debug!( - target: LOG_TARGET, - "Received block that was not requested. Requested {:?}, got {:?}.", - request.from, - block_header, - ); - - return Err(BadPeer(*who, rep::NOT_REQUESTED)) - } - - if request.fields.contains(BlockAttributes::HEADER) && - blocks.iter().any(|b| b.header.is_none()) - { - trace!( - target: LOG_TARGET, - "Missing requested header for a block in response from {who}.", - ); - - return Err(BadPeer(*who, rep::BAD_RESPONSE)) - } - - if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) - { - trace!( - target: LOG_TARGET, - "Missing requested body for a block in response from {who}.", - ); - - return Err(BadPeer(*who, rep::BAD_RESPONSE)) - } - } - - for b in blocks { - if let Some(header) = &b.header { - let hash = header.hash(); - if hash != b.hash { - debug!( - target:LOG_TARGET, - "Bad header received from {}. Expected hash {:?}, got {:?}", - who, - b.hash, - hash, - ); - return Err(BadPeer(*who, rep::BAD_BLOCK)) - } - } - if let (Some(header), Some(body)) = (&b.header, &b.body) { - let expected = *header.extrinsics_root(); - let got = HashingFor::::ordered_trie_root( - body.iter().map(Encode::encode).collect(), - sp_runtime::StateVersion::V0, - ); - if expected != got { - debug!( - target:LOG_TARGET, - "Bad extrinsic root for a block {} received from {}. Expected {:?}, got {:?}", - b.hash, - who, - expected, - got, - ); - return Err(BadPeer(*who, rep::BAD_BLOCK)) - } - } - } - - Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number())) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::service::network::NetworkServiceProvider; - use futures::executor::block_on; - use sc_block_builder::BlockBuilderProvider; - use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}; - use sp_blockchain::HeaderBackend; - use substrate_test_runtime_client::{ - runtime::{Block, Hash, Header}, - BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient, - TestClientBuilder, TestClientBuilderExt, - }; - - #[test] - fn processes_empty_response_on_justification_request_for_unknown_block() { - // if we ask for a justification for a given block to a peer that doesn't know that block - // (different from not having a justification), the peer will reply with an empty response. - // internally we should process the response as the justification not being available. - - let client = Arc::new(TestClientBuilder::new().build()); - let peer_id = PeerId::random(); - - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let (a1_hash, a1_number) = { - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - (a1.hash(), *a1.header.number()) - }; - - // add a new peer with the same best block - sync.new_peer(peer_id, a1_hash, a1_number).unwrap(); - - // and request a justification for the block - sync.request_justification(&a1_hash, a1_number); - - // the justification request should be scheduled to that peer - assert!(sync - .justification_requests() - .iter() - .any(|(who, request)| { *who == peer_id && request.from == FromBlock::Hash(a1_hash) })); - - // there are no extra pending requests - assert_eq!(sync.extra_justifications.pending_requests().count(), 0); - - // there's one in-flight extra request to the expected peer - assert!(sync.extra_justifications.active_requests().any(|(who, (hash, number))| { - *who == peer_id && *hash == a1_hash && *number == a1_number - })); - - // if the peer replies with an empty response (i.e. it doesn't know the block), - // the active request should be cleared. - assert_eq!( - sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }), - Ok(OnBlockJustification::Nothing), - ); - - // there should be no in-flight requests - assert_eq!(sync.extra_justifications.active_requests().count(), 0); - - // and the request should now be pending again, waiting for reschedule - assert!(sync - .extra_justifications - .pending_requests() - .any(|(hash, number)| { *hash == a1_hash && *number == a1_number })); - } - - #[test] - fn restart_doesnt_affect_peers_downloading_finality_data() { - let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let peer_id2 = PeerId::random(); - let peer_id3 = PeerId::random(); - - let mut new_blocks = |n| { - for _ in 0..n { - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - } - - let info = client.info(); - (info.best_hash, info.best_number) - }; - - let (b1_hash, b1_number) = new_blocks(50); - - // add 2 peers at blocks that we don't have locally - sync.new_peer(peer_id1, Hash::random(), 42).unwrap(); - sync.new_peer(peer_id2, Hash::random(), 10).unwrap(); - - // we wil send block requests to these peers - // for these blocks we don't know about - assert!(sync - .block_requests() - .into_iter() - .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); - - // add a new peer at a known block - sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); - - // we request a justification for a block we have locally - sync.request_justification(&b1_hash, b1_number); - - // the justification request should be scheduled to the - // new peer which is at the given block - assert!(sync.justification_requests().iter().any(|(p, r)| { - *p == peer_id3 && - r.fields == BlockAttributes::JUSTIFICATION && - r.from == FromBlock::Hash(b1_hash) - })); - - assert_eq!( - sync.peers.get(&peer_id3).unwrap().state, - PeerSyncState::DownloadingJustification(b1_hash), - ); - - // we restart the sync state - let block_requests = sync.restart(); - - // which should make us send out block requests to the first two peers - assert!(block_requests.map(|r| r.unwrap()).all(|event| match event { - BlockRequestAction::SendRequest { peer_id, .. } => - peer_id == peer_id1 || peer_id == peer_id2, - BlockRequestAction::RemoveStale { .. } => false, - })); - - // peer 3 should be unaffected it was downloading finality data - assert_eq!( - sync.peers.get(&peer_id3).unwrap().state, - PeerSyncState::DownloadingJustification(b1_hash), - ); - - // Set common block to something that we don't have (e.g. failed import) - sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; - let _ = sync.restart().count(); - assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); - } - - /// Send a block annoucnement for the given `header`. - fn send_block_announce( - header: Header, - peer_id: PeerId, - sync: &mut ChainSync, - ) { - let announce = BlockAnnounce { - header: header.clone(), - state: Some(BlockState::Best), - data: Some(Vec::new()), - }; - - sync.on_validated_block_announce(true, peer_id, &announce); - } - - /// Create a block response from the given `blocks`. - fn create_block_response(blocks: Vec) -> BlockResponse { - BlockResponse:: { - id: 0, - blocks: blocks - .into_iter() - .map(|b| BlockData:: { - hash: b.hash(), - header: Some(b.header().clone()), - body: Some(b.deconstruct().1), - indexed_body: None, - receipt: None, - message_queue: None, - justification: None, - justifications: None, - }) - .collect(), - } - } - - /// Get a block request from `sync` and check that is matches the expected request. - fn get_block_request( - sync: &mut ChainSync, - from: FromBlock, - max: u32, - peer: &PeerId, - ) -> BlockRequest { - let requests = sync.block_requests(); - - log::trace!(target: LOG_TARGET, "Requests: {requests:?}"); - - assert_eq!(1, requests.len()); - assert_eq!(*peer, requests[0].0); - - let request = requests[0].1.clone(); - - assert_eq!(from, request.from); - assert_eq!(Some(max), request.max); - request - } - - /// Build and import a new best block. - fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { - let at = at.unwrap_or_else(|| client.info().best_hash); - - let mut block_builder = client.new_block_at(at, Default::default(), false).unwrap(); - - if fork { - block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); - } - - let block = block_builder.build().unwrap().block; - - block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - block - } - - /// This test is a regression test as observed on a real network. - /// - /// The node is connected to multiple peers. Both of these peers are having a best block (1) - /// that is below our best block (3). Now peer 2 announces a fork of block 3 that we will - /// request from peer 2. After importing the fork, peer 2 and then peer 1 will announce block 4. - /// But as peer 1 in our view is still at block 1, we will request block 2 (which we already - /// have) from it. In the meanwhile peer 2 sends us block 4 and 3 and we send another request - /// for block 2 to peer 2. Peer 1 answers with block 2 and then peer 2. This will need to - /// succeed, as we have requested block 2 from both peers. - #[test] - fn do_not_report_peer_on_block_response_for_block_request() { - sp_tracing::try_init_simple(); - - let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 5, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let peer_id2 = PeerId::random(); - - let mut client2 = client.clone(); - let mut build_block_at = |at, import| { - let mut block_builder = client2.new_block_at(at, Default::default(), false).unwrap(); - // Make sure we generate a different block as fork - block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); - - let block = block_builder.build().unwrap().block; - - if import { - block_on(client2.import(BlockOrigin::Own, block.clone())).unwrap(); - } - - block - }; - - let block1 = build_block(&mut client, None, false); - let block2 = build_block(&mut client, None, false); - let block3 = build_block(&mut client, None, false); - let block3_fork = build_block_at(block2.hash(), false); - - // Add two peers which are on block 1. - sync.new_peer(peer_id1, block1.hash(), 1).unwrap(); - sync.new_peer(peer_id2, block1.hash(), 1).unwrap(); - - // Tell sync that our best block is 3. - sync.update_chain_info(&block3.hash(), 3); - - // There should be no requests. - assert!(sync.block_requests().is_empty()); - - // Let peer2 announce a fork of block 3 - send_block_announce(block3_fork.header().clone(), peer_id2, &mut sync); - - // Import and tell sync that we now have the fork. - block_on(client.import(BlockOrigin::Own, block3_fork.clone())).unwrap(); - sync.update_chain_info(&block3_fork.hash(), 3); - - let block4 = build_block_at(block3_fork.hash(), false); - - // Let peer2 announce block 4 and check that sync wants to get the block. - send_block_announce(block4.header().clone(), peer_id2, &mut sync); - - let request = get_block_request(&mut sync, FromBlock::Hash(block4.hash()), 2, &peer_id2); - - // Peer1 announces the same block, but as the common block is still `1`, sync will request - // block 2 again. - send_block_announce(block4.header().clone(), peer_id1, &mut sync); - - let request2 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id1); - - let response = create_block_response(vec![block4.clone(), block3_fork.clone()]); - let res = sync.on_block_data(&peer_id2, Some(request), response).unwrap(); - - // We should not yet import the blocks, because there is still an open request for fetching - // block `2` which blocks the import. - assert!( - matches!(res, OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty()) - ); - - let request3 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id2); - - let response = create_block_response(vec![block2.clone()]); - let res = sync.on_block_data(&peer_id1, Some(request2), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) - if blocks.iter().all(|b| [2, 3, 4].contains(b.header.as_ref().unwrap().number())) - )); - - let response = create_block_response(vec![block2.clone()]); - let res = sync.on_block_data(&peer_id2, Some(request3), response).unwrap(); - // Nothing to import - assert!( - matches!(res, OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty()) - ); - } - - fn unwrap_from_block_number(from: FromBlock) -> u64 { - if let FromBlock::Number(from) = from { - from - } else { - panic!("Expected a number!"); - } - } - - /// A regression test for a behavior we have seen on a live network. - /// - /// The scenario is that the node is doing a full resync and is connected to some node that is - /// doing a major sync as well. This other node that is doing a major sync will finish before - /// our node and send a block announcement message, but we don't have seen any block - /// announcement from this node in its sync process. Meaning our common number didn't change. It - /// is now expected that we start an ancestor search to find the common number. - #[test] - fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { - sp_tracing::try_init_simple(); - - let blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); - (0..MAX_DOWNLOAD_AHEAD * 2) - .map(|_| build_block(&mut client, None, false)) - .collect::>() - }; - - let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let info = client.info(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 5, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let peer_id2 = PeerId::random(); - - let best_block = blocks.last().unwrap().clone(); - let max_blocks_to_request = sync.max_blocks_per_request; - // Connect the node we will sync from - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); - sync.new_peer(peer_id2, info.best_hash, 0).unwrap(); - - let mut best_block_num = 0; - while best_block_num < MAX_DOWNLOAD_AHEAD { - let request = get_block_request( - &mut sync, - FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), - max_blocks_to_request as u32, - &peer_id1, - ); - - let from = unwrap_from_block_number(request.from.clone()); - - let mut resp_blocks = blocks[best_block_num as usize..from as usize].to_vec(); - resp_blocks.reverse(); - - let response = create_block_response(resp_blocks.clone()); - - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); - - best_block_num += max_blocks_to_request as u32; - - let _ = sync.on_blocks_processed( - max_blocks_to_request as usize, - max_blocks_to_request as usize, - resp_blocks - .iter() - .rev() - .map(|b| { - ( - Ok(BlockImportStatus::ImportedUnknown( - *b.header().number(), - Default::default(), - Some(peer_id1), - )), - b.hash(), - ) - }) - .collect(), - ); - - resp_blocks - .into_iter() - .rev() - .for_each(|b| block_on(client.import_as_final(BlockOrigin::Own, b)).unwrap()); - } - - // "Wait" for the queue to clear - sync.queue_blocks.clear(); - - // Let peer2 announce that it finished syncing - send_block_announce(best_block.header().clone(), peer_id2, &mut sync); - - let (peer1_req, peer2_req) = - sync.block_requests().into_iter().fold((None, None), |res, req| { - if req.0 == peer_id1 { - (Some(req.1), res.1) - } else if req.0 == peer_id2 { - (res.0, Some(req.1)) - } else { - panic!("Unexpected req: {:?}", req) - } - }); - - // We should now do an ancestor search to find the correct common block. - let peer2_req = peer2_req.unwrap(); - assert_eq!(Some(1), peer2_req.max); - assert_eq!(FromBlock::Number(best_block_num as u64), peer2_req.from); - - let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); - let res = sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty() - ),); - - let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); - - // As we are on the same chain, we should directly continue with requesting blocks from - // peer 2 as well. - get_block_request( - &mut sync, - FromBlock::Number(peer1_from + max_blocks_to_request as u64), - max_blocks_to_request as u32, - &peer_id2, - ); - } - - /// A test that ensures that we can sync a huge fork. - /// - /// The following scenario: - /// A peer connects to us and we both have the common block 512. The last finalized is 2048. - /// Our best block is 4096. The peer send us a block announcement with 4097 from a fork. - /// - /// We will first do an ancestor search to find the common block. After that we start to sync - /// the fork and finish it ;) - #[test] - fn can_sync_huge_fork() { - sp_tracing::try_init_simple(); - - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) - .map(|_| build_block(&mut client, None, false)) - .collect::>(); - - let fork_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); - let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] - .into_iter() - .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) - .cloned() - .collect::>(); - - fork_blocks - .into_iter() - .chain( - (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) - .map(|_| build_block(&mut client, None, true)), - ) - .collect::>() - }; - - let info = client.info(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 5, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); - let just = (*b"TEST", Vec::new()); - client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); - sync.update_chain_info(&info.best_hash, info.best_number); - - let peer_id1 = PeerId::random(); - - let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); - // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); - - send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); - - let mut request = - get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); - - // Do the ancestor search - loop { - let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; - let response = create_block_response(vec![block.clone()]); - - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { - // We found the ancenstor - break - }; - - log::trace!(target: LOG_TARGET, "Request: {request:?}"); - } - - // Now request and import the fork. - let mut best_block_num = *finalized_block.header().number() as u32; - let max_blocks_to_request = sync.max_blocks_per_request; - while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { - let request = get_block_request( - &mut sync, - FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), - max_blocks_to_request as u32, - &peer_id1, - ); - - let from = unwrap_from_block_number(request.from.clone()); - - let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); - resp_blocks.reverse(); - - let response = create_block_response(resp_blocks.clone()); - - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == sync.max_blocks_per_request as usize - ),); - - best_block_num += sync.max_blocks_per_request as u32; - - let _ = sync.on_blocks_processed( - max_blocks_to_request as usize, - max_blocks_to_request as usize, - resp_blocks - .iter() - .rev() - .map(|b| { - ( - Ok(BlockImportStatus::ImportedUnknown( - *b.header().number(), - Default::default(), - Some(peer_id1), - )), - b.hash(), - ) - }) - .collect(), - ); - - resp_blocks - .into_iter() - .rev() - .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); - } - - // Request the tip - get_block_request( - &mut sync, - FromBlock::Hash(fork_blocks.last().unwrap().hash()), - 1, - &peer_id1, - ); - } - - #[test] - fn syncs_fork_without_duplicate_requests() { - sp_tracing::try_init_simple(); - - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) - .map(|_| build_block(&mut client, None, false)) - .collect::>(); - - let fork_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); - let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] - .into_iter() - .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) - .cloned() - .collect::>(); - - fork_blocks - .into_iter() - .chain( - (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) - .map(|_| build_block(&mut client, None, true)), - ) - .collect::>() - }; - - let info = client.info(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 5, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); - let just = (*b"TEST", Vec::new()); - client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); - sync.update_chain_info(&info.best_hash, info.best_number); - - let peer_id1 = PeerId::random(); - - let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); - // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); - - send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); - - let mut request = - get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); - - // Do the ancestor search - loop { - let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; - let response = create_block_response(vec![block.clone()]); - - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { - // We found the ancenstor - break - }; - - log::trace!(target: LOG_TARGET, "Request: {request:?}"); - } - - // Now request and import the fork. - let mut best_block_num = *finalized_block.header().number() as u32; - let max_blocks_to_request = sync.max_blocks_per_request; - - let mut request = get_block_request( - &mut sync, - FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), - max_blocks_to_request as u32, - &peer_id1, - ); - let last_block_num = *fork_blocks.last().unwrap().header().number() as u32 - 1; - while best_block_num < last_block_num { - let from = unwrap_from_block_number(request.from.clone()); - - let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); - resp_blocks.reverse(); - - let response = create_block_response(resp_blocks.clone()); - - let res = sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); - - best_block_num += max_blocks_to_request as u32; - - if best_block_num < last_block_num { - // make sure we're not getting a duplicate request in the time before the blocks are - // processed - request = get_block_request( - &mut sync, - FromBlock::Number(max_blocks_to_request as u64 + best_block_num as u64), - max_blocks_to_request as u32, - &peer_id1, - ); - } - - let mut notify_imported: Vec<_> = resp_blocks - .iter() - .rev() - .map(|b| { - ( - Ok(BlockImportStatus::ImportedUnknown( - *b.header().number(), - Default::default(), - Some(peer_id1), - )), - b.hash(), - ) - }) - .collect(); - - // The import queue may send notifications in batches of varying size. So we simulate - // this here by splitting the batch into 2 notifications. - let max_blocks_to_request = sync.max_blocks_per_request; - let second_batch = notify_imported.split_off(notify_imported.len() / 2); - let _ = sync.on_blocks_processed( - max_blocks_to_request as usize, - max_blocks_to_request as usize, - notify_imported, - ); - - let _ = sync.on_blocks_processed( - max_blocks_to_request as usize, - max_blocks_to_request as usize, - second_batch, - ); - - resp_blocks - .into_iter() - .rev() - .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); - } - - // Request the tip - get_block_request( - &mut sync, - FromBlock::Hash(fork_blocks.last().unwrap().hash()), - 1, - &peer_id1, - ); - } - - #[test] - fn removes_target_fork_on_disconnect() { - sp_tracing::try_init_simple(); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let common_block = blocks[1].clone(); - // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); - - // Create a "new" header and announce it - let mut header = blocks[0].header().clone(); - header.number = 4; - send_block_announce(header, peer_id1, &mut sync); - assert!(sync.fork_targets.len() == 1); - - let _ = sync.peer_disconnected(&peer_id1); - assert!(sync.fork_targets.len() == 0); - } - - #[test] - fn can_import_response_with_missing_blocks() { - sp_tracing::try_init_simple(); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut client2 = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::>(); - - let empty_client = Arc::new(TestClientBuilder::new().build()); - - let mut sync = ChainSync::new( - SyncMode::Full, - empty_client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peer_id1 = PeerId::random(); - let best_block = blocks[3].clone(); - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); - - sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; - sync.peers.get_mut(&peer_id1).unwrap().common_number = 0; - - // Request all missing blocks and respond only with some. - let request = - get_block_request(&mut sync, FromBlock::Hash(best_block.hash()), 4, &peer_id1); - let response = - create_block_response(vec![blocks[3].clone(), blocks[2].clone(), blocks[1].clone()]); - sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); - assert_eq!(sync.best_queued_number, 0); - - // Request should only contain the missing block. - let request = get_block_request(&mut sync, FromBlock::Number(1), 1, &peer_id1); - let response = create_block_response(vec![blocks[0].clone()]); - sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - assert_eq!(sync.best_queued_number, 4); - } - #[test] - fn ancestor_search_repeat() { - let state = AncestorSearchState::::BinarySearch(1, 3); - assert!(handle_ancestor_search_state(&state, 2, true).is_none()); - } - - #[test] - fn sync_restart_removes_block_but_not_justification_requests() { - let mut client = Arc::new(TestClientBuilder::new().build()); - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 1, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - let peers = vec![PeerId::random(), PeerId::random()]; - - let mut new_blocks = |n| { - for _ in 0..n { - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - } - - let info = client.info(); - (info.best_hash, info.best_number) - }; - - let (b1_hash, b1_number) = new_blocks(50); - - // add new peer and request blocks from them - sync.new_peer(peers[0], Hash::random(), 42).unwrap(); - - // we don't actually perform any requests, just keep track of peers waiting for a response - let mut pending_responses = HashSet::new(); - - // we wil send block requests to these peers - // for these blocks we don't know about - for (peer, _request) in sync.block_requests() { - // "send" request - pending_responses.insert(peer); - } - - // add a new peer at a known block - sync.new_peer(peers[1], b1_hash, b1_number).unwrap(); - - // we request a justification for a block we have locally - sync.request_justification(&b1_hash, b1_number); - - // the justification request should be scheduled to the - // new peer which is at the given block - let mut requests = sync.justification_requests(); - assert_eq!(requests.len(), 1); - let (peer, _request) = requests.remove(0); - // "send" request - assert!(pending_responses.insert(peer)); - - assert!(!std::matches!( - sync.peers.get(&peers[0]).unwrap().state, - PeerSyncState::DownloadingJustification(_), - )); - assert_eq!( - sync.peers.get(&peers[1]).unwrap().state, - PeerSyncState::DownloadingJustification(b1_hash), - ); - assert_eq!(pending_responses.len(), 2); - - // restart sync - let request_events = sync.restart().collect::>(); - for event in request_events.iter() { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { peer_id } => { - pending_responses.remove(&peer_id); - }, - BlockRequestAction::SendRequest { peer_id, .. } => { - // we drop obsolete response, but don't register a new request, it's checked in - // the `assert!` below - pending_responses.remove(&peer_id); - }, - } - } - assert!(request_events.iter().any(|event| { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { .. } => false, - BlockRequestAction::SendRequest { peer_id, .. } => peer_id == &peers[0], - } - })); - - assert_eq!(pending_responses.len(), 1); - assert!(pending_responses.contains(&peers[1])); - assert_eq!( - sync.peers.get(&peers[1]).unwrap().state, - PeerSyncState::DownloadingJustification(b1_hash), - ); - let _ = sync.peer_disconnected(&peers[1]); - pending_responses.remove(&peers[1]); - assert_eq!(pending_responses.len(), 0); - } - - /// The test demonstrates https://github.com/paritytech/polkadot-sdk/issues/2094. - /// TODO: convert it into desired behavior test once the issue is fixed (see inline comments). - /// The issue: we currently rely on block numbers instead of block hash - /// to download blocks from peers. As a result, we can end up with blocks - /// from different forks as shown by the test. - #[test] - #[should_panic] - fn request_across_forks() { - sp_tracing::try_init_simple(); - - let (_chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); - let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..100).map(|_| build_block(&mut client, None, false)).collect::>(); - - let fork_a_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); - let mut fork_blocks = blocks[..] - .into_iter() - .inspect(|b| { - assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_)))); - block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap() - }) - .cloned() - .collect::>(); - for _ in 0..10 { - fork_blocks.push(build_block(&mut client, None, false)); - } - fork_blocks - }; - - let fork_b_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); - let mut fork_blocks = blocks[..] - .into_iter() - .inspect(|b| { - assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_)))); - block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap() - }) - .cloned() - .collect::>(); - for _ in 0..10 { - fork_blocks.push(build_block(&mut client, None, true)); - } - fork_blocks - }; - - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - ProtocolName::from("test-block-announce-protocol"), - 5, - 64, - None, - chain_sync_network_handle, - ) - .unwrap(); - - // Add the peers, all at the common ancestor 100. - let common_block = blocks.last().unwrap(); - let peer_id1 = PeerId::random(); - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); - let peer_id2 = PeerId::random(); - sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()) - .unwrap(); - - // Peer 1 announces 107 from fork 1, 100-107 get downloaded. - { - let block = (&fork_a_blocks[106]).clone(); - let peer = peer_id1; - log::trace!(target: LOG_TARGET, "<1> {peer} announces from fork 1"); - send_block_announce(block.header().clone(), peer, &mut sync); - let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 7, &peer); - let mut resp_blocks = fork_a_blocks[100_usize..107_usize].to_vec(); - resp_blocks.reverse(); - let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer, Some(request), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 7_usize - ),); - assert_eq!(sync.best_queued_number, 107); - assert_eq!(sync.best_queued_hash, block.hash()); - assert!(sync.is_known(&block.header.parent_hash())); - } - - // Peer 2 also announces 107 from fork 1. - { - let prev_best_number = sync.best_queued_number; - let prev_best_hash = sync.best_queued_hash; - let peer = peer_id2; - log::trace!(target: LOG_TARGET, "<2> {peer} announces from fork 1"); - for i in 100..107 { - let block = (&fork_a_blocks[i]).clone(); - send_block_announce(block.header().clone(), peer, &mut sync); - assert!(sync.block_requests().is_empty()); - } - assert_eq!(sync.best_queued_number, prev_best_number); - assert_eq!(sync.best_queued_hash, prev_best_hash); - } - - // Peer 2 undergoes reorg, announces 108 from fork 2, gets downloaded even though we - // don't have the parent from fork 2. - { - let block = (&fork_b_blocks[107]).clone(); - let peer = peer_id2; - log::trace!(target: LOG_TARGET, "<3> {peer} announces from fork 2"); - send_block_announce(block.header().clone(), peer, &mut sync); - // TODO: when the issue is fixed, this test can be changed to test the - // expected behavior instead. The needed changes would be: - // 1. Remove the `#[should_panic]` directive - // 2. These should be changed to check that sync.block_requests().is_empty(), after the - // block is announced. - let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 1, &peer); - let response = create_block_response(vec![block.clone()]); - let res = sync.on_block_data(&peer, Some(request), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 1_usize - ),); - assert!(sync.is_known(&block.header.parent_hash())); - } - } -} diff --git a/substrate/client/network/sync/src/mock.rs b/substrate/client/network/sync/src/mock.rs index ed7c647c7977..42220096e069 100644 --- a/substrate/client/network/sync/src/mock.rs +++ b/substrate/client/network/sync/src/mock.rs @@ -23,65 +23,8 @@ use crate::block_relay_protocol::{BlockDownloader as BlockDownloaderT, BlockResp use futures::channel::oneshot; use libp2p::PeerId; use sc_network::RequestFailure; -use sc_network_common::sync::{ - message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}, - BadPeer, ChainSync as ChainSyncT, ImportBlocksAction, Metrics, OnBlockData, - OnBlockJustification, PeerInfo, SyncStatus, -}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; - -mockall::mock! { - pub ChainSync {} - - impl ChainSyncT for ChainSync { - fn peer_info(&self, who: &PeerId) -> Option>; - fn status(&self) -> SyncStatus; - fn num_sync_requests(&self) -> usize; - fn num_downloaded_blocks(&self) -> usize; - fn num_peers(&self) -> usize; - fn new_peer( - &mut self, - who: PeerId, - best_hash: Block::Hash, - best_number: NumberFor, - ) -> Result>, BadPeer>; - fn update_chain_info(&mut self, best_hash: &Block::Hash, best_number: NumberFor); - fn request_justification(&mut self, hash: &Block::Hash, number: NumberFor); - fn clear_justification_requests(&mut self); - fn set_sync_fork_request( - &mut self, - peers: Vec, - hash: &Block::Hash, - number: NumberFor, - ); - fn on_block_data( - &mut self, - who: &PeerId, - request: Option>, - response: BlockResponse, - ) -> Result, BadPeer>; - fn on_block_justification( - &mut self, - who: PeerId, - response: BlockResponse, - ) -> Result, BadPeer>; - fn on_justification_import( - &mut self, - hash: Block::Hash, - number: NumberFor, - success: bool, - ); - fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); - fn on_validated_block_announce( - &mut self, - is_best: bool, - who: PeerId, - announce: &BlockAnnounce, - ); - fn peer_disconnected(&mut self, who: &PeerId) -> Option>; - fn metrics(&self) -> Metrics; - } -} +use sc_network_common::sync::message::{BlockData, BlockRequest}; +use sp_runtime::traits::Block as BlockT; mockall::mock! { pub BlockDownloader {} diff --git a/substrate/client/network/sync/src/pending_responses.rs b/substrate/client/network/sync/src/pending_responses.rs index c863267e7808..9e2fd5cfd674 100644 --- a/substrate/client/network/sync/src/pending_responses.rs +++ b/substrate/client/network/sync/src/pending_responses.rs @@ -19,6 +19,7 @@ //! [`PendingResponses`] is responsible for keeping track of pending responses and //! polling them. +use crate::types::PeerRequest; use futures::{ channel::oneshot, future::BoxFuture, @@ -28,11 +29,13 @@ use futures::{ use libp2p::PeerId; use log::error; use sc_network::request_responses::RequestFailure; -use sc_network_common::sync::PeerRequest; use sp_runtime::traits::Block as BlockT; use std::task::{Context, Poll}; use tokio_stream::StreamMap; +/// Log target for this file. +const LOG_TARGET: &'static str = "sync"; + /// Response result. type ResponseResult = Result, RequestFailure>, oneshot::Canceled>; @@ -74,7 +77,7 @@ impl PendingResponses { .is_some() { error!( - target: crate::LOG_TARGET, + target: LOG_TARGET, "Discarded pending response from peer {peer_id}, request type: {request_type:?}.", ); debug_assert!(false); diff --git a/substrate/client/network/common/src/sync/metrics.rs b/substrate/client/network/sync/src/request_metrics.rs similarity index 100% rename from substrate/client/network/common/src/sync/metrics.rs rename to substrate/client/network/sync/src/request_metrics.rs diff --git a/substrate/client/network/sync/src/service/chain_sync.rs b/substrate/client/network/sync/src/service/chain_sync.rs index f9e0e401fdf8..3d11880c511c 100644 --- a/substrate/client/network/sync/src/service/chain_sync.rs +++ b/substrate/client/network/sync/src/service/chain_sync.rs @@ -16,14 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::types::{ExtendedPeerInfo, SyncEvent, SyncEventStream, SyncStatus, SyncStatusProvider}; + use futures::{channel::oneshot, Stream}; use libp2p::PeerId; use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link}; use sc_network::{NetworkBlock, NetworkSyncForkRequest}; -use sc_network_common::sync::{ - ExtendedPeerInfo, SyncEvent, SyncEventStream, SyncStatus, SyncStatusProvider, -}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_runtime::traits::{Block as BlockT, NumberFor}; diff --git a/substrate/client/network/sync/src/state.rs b/substrate/client/network/sync/src/state.rs index 305f0ee6838a..5d34613d1c5e 100644 --- a/substrate/client/network/sync/src/state.rs +++ b/substrate/client/network/sync/src/state.rs @@ -18,12 +18,14 @@ //! State sync support. -use crate::schema::v1::{StateEntry, StateRequest, StateResponse}; +use crate::{ + schema::v1::{StateEntry, StateRequest, StateResponse}, + types::StateDownloadProgress, +}; use codec::{Decode, Encode}; use log::debug; use sc_client_api::{CompactProof, ProofProvider}; use sc_consensus::ImportedState; -use sc_network_common::sync::StateDownloadProgress; use smallvec::SmallVec; use sp_core::storage::well_known_keys; use sp_runtime::{ diff --git a/substrate/client/network/sync/src/types.rs b/substrate/client/network/sync/src/types.rs new file mode 100644 index 000000000000..5931cf47b28a --- /dev/null +++ b/substrate/client/network/sync/src/types.rs @@ -0,0 +1,206 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Common syncing types. + +use futures::Stream; +use sc_network_common::{role::Roles, types::ReputationChange}; + +use libp2p::PeerId; + +use crate::warp::WarpSyncProgress; +use sc_network_common::sync::message::BlockRequest; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc}; + +pub use sc_network_common::sync::SyncMode; + +/// The sync status of a peer we are trying to sync with +#[derive(Debug)] +pub struct PeerInfo { + /// Their best block hash. + pub best_hash: Block::Hash, + /// Their best block number. + pub best_number: NumberFor, +} + +/// Info about a peer's known state (both full and light). +#[derive(Clone, Debug)] +pub struct ExtendedPeerInfo { + /// Roles + pub roles: Roles, + /// Peer best block hash + pub best_hash: B::Hash, + /// Peer best block number + pub best_number: NumberFor, +} + +/// Reported sync state. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum SyncState { + /// Initial sync is complete, keep-up sync is active. + Idle, + /// Actively catching up with the chain. + Downloading { target: BlockNumber }, + /// All blocks are downloaded and are being imported. + Importing { target: BlockNumber }, +} + +impl SyncState { + /// Are we actively catching up with the chain? + pub fn is_major_syncing(&self) -> bool { + !matches!(self, SyncState::Idle) + } +} + +/// Reported state download progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct StateDownloadProgress { + /// Estimated download percentage. + pub percentage: u32, + /// Total state size in bytes downloaded so far. + pub size: u64, +} + +/// Syncing status and statistics. +#[derive(Debug, Clone)] +pub struct SyncStatus { + /// Current global sync state. + pub state: SyncState>, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_peers: u32, + /// Number of peers known to `SyncingEngine` (both full and light). + pub num_connected_peers: u32, + /// Number of blocks queued for import + pub queued_blocks: u32, + /// State sync status in progress, if any. + pub state_sync: Option, + /// Warp sync in progress, if any. + pub warp_sync: Option>, +} + +/// A peer did not behave as expected and should be reported. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BadPeer(pub PeerId, pub ReputationChange); + +impl fmt::Display for BadPeer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) + } +} + +impl std::error::Error for BadPeer {} + +#[derive(Debug)] +pub struct Metrics { + pub queued_blocks: u32, + pub fork_targets: u32, + pub justifications: crate::request_metrics::Metrics, +} + +#[derive(Debug)] +pub enum PeerRequest { + Block(BlockRequest), + State, + WarpProof, +} + +#[derive(Debug)] +pub enum PeerRequestType { + Block, + State, + WarpProof, +} + +impl PeerRequest { + pub fn get_type(&self) -> PeerRequestType { + match self { + PeerRequest::Block(_) => PeerRequestType::Block, + PeerRequest::State => PeerRequestType::State, + PeerRequest::WarpProof => PeerRequestType::WarpProof, + } + } +} + +/// Wrapper for implementation-specific state request. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueStateRequest(pub Box); + +impl fmt::Debug for OpaqueStateRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueStateRequest").finish() + } +} + +/// Wrapper for implementation-specific state response. +/// +/// NOTE: Implementation must be able to encode and decode it for network purposes. +pub struct OpaqueStateResponse(pub Box); + +impl fmt::Debug for OpaqueStateResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpaqueStateResponse").finish() + } +} + +/// Provides high-level status of syncing. +#[async_trait::async_trait] +pub trait SyncStatusProvider: Send + Sync { + /// Get high-level view of the syncing status. + async fn status(&self) -> Result, ()>; +} + +#[async_trait::async_trait] +impl SyncStatusProvider for Arc +where + T: ?Sized, + T: SyncStatusProvider, + Block: BlockT, +{ + async fn status(&self) -> Result, ()> { + T::status(self).await + } +} + +/// Syncing-related events that other protocols can subscribe to. +pub enum SyncEvent { + /// Peer that the syncing implementation is tracking connected. + PeerConnected(PeerId), + + /// Peer that the syncing implementation was tracking disconnected. + PeerDisconnected(PeerId), +} + +pub trait SyncEventStream: Send + Sync { + /// Subscribe to syncing-related events. + fn event_stream(&self, name: &'static str) -> Pin + Send>>; +} + +impl SyncEventStream for Arc +where + T: ?Sized, + T: SyncEventStream, +{ + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + T::event_stream(self, name) + } +} diff --git a/substrate/client/network/sync/src/warp.rs b/substrate/client/network/sync/src/warp.rs index 74835a6e015e..2c0adc856c12 100644 --- a/substrate/client/network/sync/src/warp.rs +++ b/substrate/client/network/sync/src/warp.rs @@ -18,28 +18,107 @@ //! Warp sync support. +pub use sp_consensus_grandpa::{AuthorityList, SetId}; + use crate::{ schema::v1::{StateRequest, StateResponse}, state::{ImportResult, StateSync}, }; +use codec::{Decode, Encode}; use futures::channel::oneshot; use log::error; use sc_client_api::ProofProvider; -use sc_network_common::sync::{ - message::{BlockAttributes, BlockData, BlockRequest, Direction, FromBlock}, - warp::{ - EncodedProof, VerificationResult, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, - WarpSyncProvider, - }, +use sc_network_common::sync::message::{ + BlockAttributes, BlockData, BlockRequest, Direction, FromBlock, }; use sp_blockchain::HeaderBackend; -use sp_consensus_grandpa::{AuthorityList, SetId}; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; -use std::sync::Arc; +use std::{fmt, sync::Arc}; /// Log target for this file. const LOG_TARGET: &'static str = "sync"; +/// Scale-encoded warp sync proof response. +pub struct EncodedProof(pub Vec); + +/// Warp sync request +#[derive(Encode, Decode, Debug)] +pub struct WarpProofRequest { + /// Start collecting proofs from this block. + pub begin: B::Hash, +} + +/// Proof verification result. +pub enum VerificationResult { + /// Proof is valid, but the target was not reached. + Partial(SetId, AuthorityList, Block::Hash), + /// Target finality is proved. + Complete(SetId, AuthorityList, Block::Header), +} + +/// Warp sync backend. Handles retrieving and verifying warp sync proofs. +pub trait WarpSyncProvider: Send + Sync { + /// Generate proof starting at given block hash. The proof is accumulated until maximum proof + /// size is reached. + fn generate( + &self, + start: Block::Hash, + ) -> Result>; + /// Verify warp proof against current set of authorities. + fn verify( + &self, + proof: &EncodedProof, + set_id: SetId, + authorities: AuthorityList, + ) -> Result, Box>; + /// Get current list of authorities. This is supposed to be genesis authorities when starting + /// sync. + fn current_authorities(&self) -> AuthorityList; +} + +/// Reported warp sync phase. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum WarpSyncPhase { + /// Waiting for peers to connect. + AwaitingPeers { required_peers: usize }, + /// Waiting for target block to be received. + AwaitingTargetBlock, + /// Downloading and verifying grandpa warp proofs. + DownloadingWarpProofs, + /// Downloading target block. + DownloadingTargetBlock, + /// Downloading state data. + DownloadingState, + /// Importing state. + ImportingState, + /// Downloading block history. + DownloadingBlocks(NumberFor), +} + +impl fmt::Display for WarpSyncPhase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::AwaitingPeers { required_peers } => + write!(f, "Waiting for {required_peers} peers to be connected"), + Self::AwaitingTargetBlock => write!(f, "Waiting for target block to be received"), + Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), + Self::DownloadingTargetBlock => write!(f, "Downloading target block"), + Self::DownloadingState => write!(f, "Downloading state"), + Self::ImportingState => write!(f, "Importing state"), + Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), + } + } +} + +/// Reported warp sync progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct WarpSyncProgress { + /// Estimated download percentage. + pub phase: WarpSyncPhase, + /// Total bytes downloaded so far. + pub total_bytes: u64, +} + /// The different types of warp syncing, passed to `build_network`. pub enum WarpSyncParams { /// Standard warp sync for the chain. diff --git a/substrate/client/network/sync/src/warp_request_handler.rs b/substrate/client/network/sync/src/warp_request_handler.rs index 0e502a6dba59..b23f30c50dd2 100644 --- a/substrate/client/network/sync/src/warp_request_handler.rs +++ b/substrate/client/network/sync/src/warp_request_handler.rs @@ -20,13 +20,13 @@ use codec::Decode; use futures::{channel::oneshot, stream::StreamExt}; use log::debug; +use crate::warp::{EncodedProof, WarpProofRequest, WarpSyncProvider}; use sc_network::{ config::ProtocolId, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, }; -use sc_network_common::sync::warp::{EncodedProof, WarpProofRequest, WarpSyncProvider}; use sp_runtime::traits::Block as BlockT; use std::{sync::Arc, time::Duration}; diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 11505903e35d..ad4201f94a20 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -60,16 +60,15 @@ use sc_network::{ Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, NetworkWorker, }; -use sc_network_common::{ - role::Roles, - sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, -}; +use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, service::{chain_sync::SyncingService, network::NetworkServiceProvider}, state_request_handler::StateRequestHandler, - warp::WarpSyncParams, + warp::{ + AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncParams, WarpSyncProvider, + }, warp_request_handler, }; use sc_service::client::Client; diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index 5e42465974bb..2a6aa4b3a40a 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -21,6 +21,7 @@ log = "0.4.17" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-network = { path = ".." } sc-network-common = { path = "../common" } +sc-network-sync = { path = "../sync" } sc-utils = { path = "../../utils" } sp-runtime = { path = "../../../primitives/runtime" } sp-consensus = { path = "../../../primitives/consensus/common" } diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs index b46733d42723..1b97d4b96c97 100644 --- a/substrate/client/network/transactions/src/lib.rs +++ b/substrate/client/network/transactions/src/lib.rs @@ -42,11 +42,8 @@ use sc_network::{ utils::{interval, LruHashSet}, NetworkEventStream, NetworkNotification, NetworkPeers, }; -use sc_network_common::{ - role::ObservedRole, - sync::{SyncEvent, SyncEventStream}, - ExHashT, -}; +use sc_network_common::{role::ObservedRole, ExHashT}; +use sc_network_sync::{SyncEvent, SyncEventStream}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; diff --git a/substrate/client/service/src/metrics.rs b/substrate/client/service/src/metrics.rs index ece5758be771..a411a83a784e 100644 --- a/substrate/client/service/src/metrics.rs +++ b/substrate/client/service/src/metrics.rs @@ -23,7 +23,7 @@ use futures_timer::Delay; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{ClientInfo, UsageProvider}; use sc_network::{config::Role, NetworkStatus, NetworkStatusProvider}; -use sc_network_common::sync::{SyncStatus, SyncStatusProvider}; +use sc_network_sync::{SyncStatus, SyncStatusProvider}; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus}; use sc_utils::metrics::register_globals; From f50054cffba4df6b25a16dafdbc66d582e419355 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 1 Nov 2023 14:37:09 +0100 Subject: [PATCH 56/69] [ci] Update rust nightly in ci image (#2115) Run CI using new image with nightly 2023-11-01 cc https://github.com/paritytech/polkadot-sdk/issues/2113 cc https://github.com/paritytech/ci_cd/issues/896 --- .github/workflows/fmt-check.yml | 2 +- .gitlab-ci.yml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index 55e67f2799b8..7ca4413bb050 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -14,7 +14,7 @@ jobs: os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} container: - image: paritytech/ci-unified:bullseye-1.70.0-2023-05-23-v20230706 + image: paritytech/ci-unified:bullseye-1.73.0-2023-11-01-v20231025 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 835b668de259..6dc7fc1a3cdb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,8 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + # CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.73.0-2023-11-01-v20231025" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" From 00b85c51dfbc0fecbb8a4dd3635d4c177a6527a6 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Wed, 1 Nov 2023 15:21:44 +0100 Subject: [PATCH 57/69] [NPoS] Paging reward payouts in order to scale rewardable nominators (#1189) helps https://github.com/paritytech/polkadot-sdk/issues/439. closes https://github.com/paritytech/polkadot-sdk/issues/473. PR link in the older substrate repository: https://github.com/paritytech/substrate/pull/13498. # Context Rewards payout is processed today in a single block and limited to `MaxNominatorRewardedPerValidator`. This number is currently 512 on both Kusama and Polkadot. This PR tries to scale the nominators payout to an unlimited count in a multi-block fashion. Exposures are stored in pages, with each page capped to a certain number (`MaxExposurePageSize`). Starting out, this number would be the same as `MaxNominatorRewardedPerValidator`, but eventually, this number can be lowered through new runtime upgrades to limit the rewardeable nominators per dispatched call instruction. The changes in the PR are backward compatible. ## How payouts would work like after this change Staking exposes two calls, 1) the existing `payout_stakers` and 2) `payout_stakers_by_page`. ### payout_stakers This remains backward compatible with no signature change. If for a given era a validator has multiple pages, they can call `payout_stakers` multiple times. The pages are executed in an ascending sequence and the runtime takes care of preventing double claims. ### payout_stakers_by_page Very similar to `payout_stakers` but also accepts an extra param `page_index`. An account can choose to payout rewards only for an explicitly passed `page_index`. **Lets look at an example scenario** Given an active validator on Kusama had 1100 nominators, `MaxExposurePageSize` set to 512 for Era e. In order to pay out rewards to all nominators, the caller would need to call `payout_stakers` 3 times. - `payout_stakers(origin, stash, e)` => will pay the first 512 nominators. - `payout_stakers(origin, stash, e)` => will pay the second set of 512 nominators. - `payout_stakers(origin, stash, e)` => will pay the last set of 76 nominators. ... - `payout_stakers(origin, stash, e)` => calling it the 4th time would return an error `InvalidPage`. The above calls can also be replaced by `payout_stakers_by_page` and passing a `page_index` explicitly. ## Commission note Validator commission is paid out in chunks across all the pages where each commission chunk is proportional to the total stake of the current page. This implies higher the total stake of a page, higher will be the commission. If all the pages of a validator's single era are paid out, the sum of commission paid to the validator across all pages should be equal to what the commission would have been if we had a non-paged exposure. ### Migration Note Strictly speaking, we did not need to bump our storage version since there is no migration of storage in this PR. But it is still useful to mark a storage upgrade for the following reasons: - New storage items are introduced in this PR while some older storage items are deprecated. - For the next `HistoryDepth` eras, the exposure would be incrementally migrated to its corresponding paged storage item. - Runtimes using staking pallet would strictly need to wait at least `HistoryDepth` eras with current upgraded version (14) for the migration to complete. At some era `E` such that `E > era_at_which_V14_gets_into_effect + HistoryDepth`, we will upgrade to version X which will remove the deprecated storage items. In other words, it is a strict requirement that Ex - E14 > `HistoryDepth`, where Ex = Era at which deprecated storages are removed from runtime, E14 = Era at which runtime is upgraded to version 14. - For Polkadot and Kusama, there is a [tracker ticket](https://github.com/paritytech/polkadot-sdk/issues/433) to clean up the deprecated storage items. ### Storage Changes #### Added - ErasStakersOverview - ClaimedRewards - ErasStakersPaged #### Deprecated The following can be cleaned up after 84 eras which is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/433). - ErasStakers. - ErasStakersClipped. - StakingLedger.claimed_rewards, renamed to StakingLedger.legacy_claimed_rewards. ### Config Changes - Renamed MaxNominatorRewardedPerValidator to MaxExposurePageSize. ### TODO - [x] Tracker ticket for cleaning up the old code after 84 eras. - [x] Add companion. - [x] Redo benchmarks before merge. - [x] Add Changelog for pallet_staking. - [x] Pallet should be configurable to enable/disable paged rewards. - [x] Commission payouts are distributed across pages. - [x] Review documentation thoroughly. - [x] Rename `MaxNominatorRewardedPerValidator` -> `MaxExposurePageSize`. - [x] NMap for `ErasStakersPaged`. - [x] Deprecate ErasStakers. - [x] Integrity tests. ### Followup issues [Runtime api for deprecated ErasStakers storage item](https://github.com/paritytech/polkadot-sdk/issues/426) --------- Co-authored-by: Javier Viola Co-authored-by: Ross Bulat Co-authored-by: command-bot <> --- Cargo.lock | 1 + polkadot/runtime/test-runtime/src/lib.rs | 9 +- polkadot/runtime/westend/src/lib.rs | 23 +- .../westend/src/weights/pallet_staking.rs | 1055 ++++----- prdoc/pr_1289.prdoc | 28 + substrate/bin/node/runtime/src/lib.rs | 16 +- substrate/frame/babe/src/mock.rs | 2 +- substrate/frame/babe/src/tests.rs | 8 +- substrate/frame/beefy/src/mock.rs | 2 +- substrate/frame/beefy/src/tests.rs | 12 +- .../test-staking-e2e/src/mock.rs | 5 +- .../frame/fast-unstake/src/benchmarking.rs | 4 +- substrate/frame/fast-unstake/src/lib.rs | 4 - substrate/frame/fast-unstake/src/mock.rs | 4 +- substrate/frame/grandpa/src/mock.rs | 2 +- substrate/frame/grandpa/src/tests.rs | 12 +- .../nomination-pools/benchmarking/src/mock.rs | 2 +- substrate/frame/nomination-pools/src/mock.rs | 5 + .../nomination-pools/test-staking/src/mock.rs | 2 +- .../frame/offences/benchmarking/src/mock.rs | 2 +- substrate/frame/root-offences/src/lib.rs | 2 +- substrate/frame/root-offences/src/mock.rs | 2 +- .../frame/session/benchmarking/src/mock.rs | 2 +- substrate/frame/staking/CHANGELOG.md | 27 + substrate/frame/staking/README.md | 16 +- .../frame/staking/runtime-api/Cargo.toml | 5 +- .../frame/staking/runtime-api/src/lib.rs | 6 +- substrate/frame/staking/src/benchmarking.rs | 22 +- substrate/frame/staking/src/ledger.rs | 19 +- substrate/frame/staking/src/lib.rs | 301 ++- substrate/frame/staking/src/migrations.rs | 50 +- substrate/frame/staking/src/mock.rs | 18 +- substrate/frame/staking/src/pallet/impls.rs | 222 +- substrate/frame/staking/src/pallet/mod.rs | 185 +- substrate/frame/staking/src/tests.rs | 1403 ++++++++--- substrate/frame/staking/src/weights.rs | 2051 +++++++++-------- substrate/primitives/staking/src/lib.rs | 133 +- 37 files changed, 3474 insertions(+), 2188 deletions(-) create mode 100644 prdoc/pr_1289.prdoc create mode 100644 substrate/frame/staking/CHANGELOG.md diff --git a/Cargo.lock b/Cargo.lock index 0f386c52b384..fb49533a7f94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10619,6 +10619,7 @@ version = "4.0.0-dev" dependencies = [ "parity-scale-codec", "sp-api", + "sp-staking", ] [[package]] diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 888477366d47..596e65eca068 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -192,7 +192,7 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type KeyOwnerProof = >::Proof; @@ -318,7 +318,8 @@ parameter_types! { // 27 eras in which slashes can be cancelled (a bit less than 7 days). pub storage SlashDeferDuration: sp_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub storage MaxNominatorRewardedPerValidator: u32 = 64; + pub const MaxExposurePageSize: u32 = 64; + pub const MaxNominators: u32 = 256; pub storage OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxAuthorities: u32 = 100_000; pub const OnChainMaxWinners: u32 = u32::MAX; @@ -354,7 +355,7 @@ impl pallet_staking::Config for Runtime { type AdminOrigin = frame_system::EnsureNever<()>; type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type MaxExposurePageSize = MaxExposurePageSize; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; @@ -380,7 +381,7 @@ impl pallet_grandpa::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type KeyOwnerProof = sp_core::Void; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index b8b2e540e96b..9dfc3389d574 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -255,7 +255,7 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type KeyOwnerProof = >::Proof; @@ -306,7 +306,7 @@ parameter_types! { impl pallet_beefy::Config for Runtime { type BeefyId = BeefyId; type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = BeefySetIdSessionEntries; type OnNewValidatorSet = BeefyMmrLeaf; type WeightInfo = (); @@ -645,7 +645,11 @@ parameter_types! { // 1 era in which slashes can be cancelled (6 hours). pub const SlashDeferDuration: sp_staking::EraIndex = 1; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const MaxExposurePageSize: u32 = 64; + // Note: this is not really correct as Max Nominators is (MaxExposurePageSize * page_count) but + // this is an unbounded number. We just set it to a reasonably high value, 1 full page + // of nominators. + pub const MaxNominators: u32 = 64; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxNominations: u32 = ::LIMIT as u32; } @@ -665,7 +669,7 @@ impl pallet_staking::Config for Runtime { type AdminOrigin = EnsureRoot; type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type MaxExposurePageSize = MaxExposurePageSize; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = ElectionProviderMultiPhase; @@ -688,8 +692,6 @@ impl pallet_fast_unstake::Config for Runtime { type ControlOrigin = EnsureRoot; type Staking = Staking; type MaxErasToCheckPerBlock = ConstU32<1>; - #[cfg(feature = "runtime-benchmarks")] - type MaxBackersPerValidator = MaxNominatorRewardedPerValidator; type WeightInfo = weights::pallet_fast_unstake::WeightInfo; } @@ -788,7 +790,7 @@ impl pallet_grandpa::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type KeyOwnerProof = >::Proof; @@ -1547,6 +1549,7 @@ pub mod migrations { pub type Unreleased = ( pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, + pallet_staking::migrations::v14::MigrateToV14, assigned_slots::migration::v1::VersionCheckedMigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, @@ -2100,10 +2103,14 @@ sp_api::impl_runtime_apis! { } } - impl pallet_staking_runtime_api::StakingApi for Runtime { + impl pallet_staking_runtime_api::StakingApi for Runtime { fn nominations_quota(balance: Balance) -> u32 { Staking::api_nominations_quota(balance) } + + fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { + Staking::api_eras_stakers_page_count(era, account) + } } #[cfg(feature = "try-runtime")] diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs index 099693d26b50..3c4542c6d6fe 100644 --- a/polkadot/runtime/westend/src/weights/pallet_staking.rs +++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_staking // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_staking +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,746 +48,755 @@ use core::marker::PhantomData; /// Weight functions for `pallet_staking`. pub struct WeightInfo(PhantomData); impl pallet_staking::WeightInfo for WeightInfo { - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `1014` + // Measured: `894` // Estimated: `4764` - // Minimum execution time: 51_108_000 picoseconds. - Weight::from_parts(52_521_000, 0) + // Minimum execution time: 39_950_000 picoseconds. + Weight::from_parts(41_107_000, 0) .saturating_add(Weight::from_parts(0, 4764)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn bond_extra() -> Weight { // Proof Size summary in bytes: - // Measured: `1959` + // Measured: `1921` // Estimated: `8877` - // Minimum execution time: 96_564_000 picoseconds. - Weight::from_parts(100_133_000, 0) + // Minimum execution time: 83_828_000 picoseconds. + Weight::from_parts(85_733_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `2166` + // Measured: `2128` // Estimated: `8877` - // Minimum execution time: 97_705_000 picoseconds. - Weight::from_parts(102_055_000, 0) + // Minimum execution time: 89_002_000 picoseconds. + Weight::from_parts(91_556_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `981` + // Measured: `1075` // Estimated: `4764` - // Minimum execution time: 45_257_000 picoseconds. - Weight::from_parts(47_309_508, 0) + // Minimum execution time: 40_839_000 picoseconds. + Weight::from_parts(42_122_428, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_343 - .saturating_add(Weight::from_parts(61_484, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4)) + // Standard Error: 884 + .saturating_add(Weight::from_parts(46_036, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2221 + s * (4 ±0)` + // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 94_800_000 picoseconds. - Weight::from_parts(101_763_223, 0) + // Minimum execution time: 84_244_000 picoseconds. + Weight::from_parts(91_199_964, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 6_481 - .saturating_add(Weight::from_parts(1_450_372, 0).saturating_mul(s.into())) + // Standard Error: 3_381 + .saturating_add(Weight::from_parts(1_327_289, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:1 w:0) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:1 w:0) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:1) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:1 w:0) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:1) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn validate() -> Weight { // Proof Size summary in bytes: - // Measured: `1343` + // Measured: `1301` // Estimated: `4556` - // Minimum execution time: 57_763_000 picoseconds. - Weight::from_parts(59_394_000, 0) + // Minimum execution time: 49_693_000 picoseconds. + Weight::from_parts(50_814_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:128 w:128) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:128 w:128) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1248 + k * (569 ±0)` + // Measured: `1243 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 35_501_000 picoseconds. - Weight::from_parts(32_260_525, 0) + // Minimum execution time: 29_140_000 picoseconds. + Weight::from_parts(28_309_627, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 34_554 - .saturating_add(Weight::from_parts(10_625_386, 0).saturating_mul(k.into())) + // Standard Error: 5_780 + .saturating_add(Weight::from_parts(6_509_869, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:17 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1839 + n * (102 ±0)` + // Measured: `1797 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 67_970_000 picoseconds. - Weight::from_parts(65_110_939, 0) + // Minimum execution time: 61_377_000 picoseconds. + Weight::from_parts(58_805_232, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 32_193 - .saturating_add(Weight::from_parts(4_688_614, 0).saturating_mul(n.into())) + // Standard Error: 14_197 + .saturating_add(Weight::from_parts(4_090_197, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1675` + // Measured: `1581` // Estimated: `6248` - // Minimum execution time: 59_515_000 picoseconds. - Weight::from_parts(62_462_000, 0) + // Minimum execution time: 52_736_000 picoseconds. + Weight::from_parts(54_573_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn set_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `771` + // Measured: `865` // Estimated: `4556` - // Minimum execution time: 13_943_000 picoseconds. - Weight::from_parts(14_384_000, 0) + // Minimum execution time: 16_496_000 picoseconds. + Weight::from_parts(17_045_000, 0) .saturating_add(Weight::from_parts(0, 4556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:2) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:2) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: - // Measured: `870` - // Estimated: `8122` - // Minimum execution time: 21_212_000 picoseconds. - Weight::from_parts(22_061_000, 0) - .saturating_add(Weight::from_parts(0, 8122)) - .saturating_add(T::DbWeight::get().reads(3)) + // Measured: `865` + // Estimated: `4556` + // Minimum execution time: 19_339_000 picoseconds. + Weight::from_parts(20_187_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Staking ValidatorCount (r:0 w:1) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::ValidatorCount` (r:0 w:1) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_validator_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_977_000 picoseconds. - Weight::from_parts(3_217_000, 0) + // Minimum execution time: 2_340_000 picoseconds. + Weight::from_parts(2_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_no_eras() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_152_000 picoseconds. - Weight::from_parts(9_949_000, 0) + // Minimum execution time: 7_483_000 picoseconds. + Weight::from_parts(8_101_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_509_000 picoseconds. - Weight::from_parts(9_838_000, 0) + // Minimum execution time: 7_773_000 picoseconds. + Weight::from_parts(8_610_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era_always() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_480_000 picoseconds. - Weight::from_parts(9_755_000, 0) + // Minimum execution time: 7_577_000 picoseconds. + Weight::from_parts(7_937_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking Invulnerables (r:0 w:1) - /// Proof Skipped: Staking Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::Invulnerables` (r:0 w:1) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_140_000 picoseconds. - Weight::from_parts(3_438_665, 0) + // Minimum execution time: 2_522_000 picoseconds. + Weight::from_parts(2_735_307, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 93 - .saturating_add(Weight::from_parts(15_688, 0).saturating_mul(v.into())) + // Standard Error: 38 + .saturating_add(Weight::from_parts(10_553, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1947 + s * (4 ±0)` + // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_729_000 picoseconds. - Weight::from_parts(93_633_668, 0) + // Minimum execution time: 82_547_000 picoseconds. + Weight::from_parts(89_373_781, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 6_522 - .saturating_add(Weight::from_parts(1_421_038, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(12)) + // Standard Error: 3_589 + .saturating_add(Weight::from_parts(1_258_878, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking UnappliedSlashes (r:1 w:1) - /// Proof Skipped: Staking UnappliedSlashes (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `66606` - // Estimated: `70071` - // Minimum execution time: 135_155_000 picoseconds. - Weight::from_parts(960_317_735, 0) - .saturating_add(Weight::from_parts(0, 70071)) - // Standard Error: 59_264 - .saturating_add(Weight::from_parts(4_884_888, 0).saturating_mul(s.into())) + // Measured: `66639` + // Estimated: `70104` + // Minimum execution time: 134_619_000 picoseconds. + Weight::from_parts(1_194_949_665, 0) + .saturating_add(Weight::from_parts(0, 70104)) + // Standard Error: 76_719 + .saturating_add(Weight::from_parts(6_455_953, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:65 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:65 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:65 w:65) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:66 w:66) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::ClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:65 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 64]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `5773 + n * (151 ±0)` - // Estimated: `8579 + n * (2603 ±0)` - // Minimum execution time: 92_788_000 picoseconds. - Weight::from_parts(129_527_249, 0) - .saturating_add(Weight::from_parts(0, 8579)) - // Standard Error: 73_346 - .saturating_add(Weight::from_parts(33_413_624, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `6895 + n * (156 ±0)` + // Estimated: `9802 + n * (2603 ±0)` + // Minimum execution time: 114_338_000 picoseconds. + Weight::from_parts(138_518_124, 0) + .saturating_add(Weight::from_parts(0, 9802)) + // Standard Error: 53_621 + .saturating_add(Weight::from_parts(25_676_781, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(5)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:65 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:65 w:65) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:65 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:65 w:65) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:65 w:65) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:65 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:65 w:65) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersClipped` (r:1 w:0) + /// Proof: `Staking::ErasStakersClipped` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:65 w:65) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:65 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:65 w:65) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:65 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 64]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `8056 + n * (396 ±0)` - // Estimated: `10634 + n * (3774 ±0)` - // Minimum execution time: 118_795_000 picoseconds. - Weight::from_parts(181_663_036, 0) - .saturating_add(Weight::from_parts(0, 10634)) - // Standard Error: 132_894 - .saturating_add(Weight::from_parts(51_369_596, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11)) + // Measured: `8249 + n * (396 ±0)` + // Estimated: `10779 + n * (3774 ±3)` + // Minimum execution time: 132_719_000 picoseconds. + Weight::from_parts(170_505_880, 0) + .saturating_add(Weight::from_parts(0, 10779)) + // Standard Error: 32_527 + .saturating_add(Weight::from_parts(42_453_136, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1960 + l * (5 ±0)` + // Measured: `1922 + l * (5 ±0)` // Estimated: `8877` - // Minimum execution time: 88_870_000 picoseconds. - Weight::from_parts(92_783_195, 0) + // Minimum execution time: 78_438_000 picoseconds. + Weight::from_parts(81_774_734, 0) .saturating_add(Weight::from_parts(0, 8877)) - // Standard Error: 7_412 - .saturating_add(Weight::from_parts(49_785, 0).saturating_mul(l.into())) + // Standard Error: 3_706 + .saturating_add(Weight::from_parts(51_358, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2221 + s * (4 ±0)` + // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 102_112_000 picoseconds. - Weight::from_parts(103_755_459, 0) + // Minimum execution time: 92_129_000 picoseconds. + Weight::from_parts(94_137_611, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 6_107 - .saturating_add(Weight::from_parts(1_436_139, 0).saturating_mul(s.into())) + // Standard Error: 4_141 + .saturating_add(Weight::from_parts(1_283_823, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:178 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:110 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:110 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:11 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:110 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:110 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinimumValidatorCount (r:1 w:0) - /// Proof: Staking MinimumValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:1) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:0 w:10) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:0 w:10) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:0 w:10) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasTotalStake (r:0 w:1) - /// Proof: Staking ErasTotalStake (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:0 w:1) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:178 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:110 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:110 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:110 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:110 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:11 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumValidatorCount` (r:1 w:0) + /// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:1) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:20) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:0 w:10) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:0 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (716 ±0) + v * (3594 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 547_465_000 picoseconds. - Weight::from_parts(557_541_000, 0) + // Minimum execution time: 527_896_000 picoseconds. + Weight::from_parts(533_325_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 2_380_806 - .saturating_add(Weight::from_parts(78_379_807, 0).saturating_mul(v.into())) - // Standard Error: 237_234 - .saturating_add(Weight::from_parts(22_772_283, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(185)) + // Standard Error: 2_064_813 + .saturating_add(Weight::from_parts(68_484_503, 0).saturating_mul(v.into())) + // Standard Error: 205_747 + .saturating_add(Weight::from_parts(18_833_735, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(184)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(8)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:178 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2000 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:2000 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1000 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2000 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2000 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:178 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2000 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2000 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2000 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:2000 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1000 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3151 + n * (907 ±0) + v * (391 ±0)` + // Measured: `3108 + n * (907 ±0) + v * (391 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 39_710_080_000 picoseconds. - Weight::from_parts(42_191_823_000, 0) + // Minimum execution time: 35_302_472_000 picoseconds. + Weight::from_parts(35_651_169_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 506_609 - .saturating_add(Weight::from_parts(7_688_462, 0).saturating_mul(v.into())) - // Standard Error: 506_609 - .saturating_add(Weight::from_parts(6_303_908, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(180)) + // Standard Error: 412_098 + .saturating_add(Weight::from_parts(5_172_265, 0).saturating_mul(v.into())) + // Standard Error: 412_098 + .saturating_add(Weight::from_parts(4_142_772, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(179)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1001 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `951 + v * (50 ±0)` + // Measured: `946 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_603_304_000 picoseconds. - Weight::from_parts(481_860_383, 0) + // Minimum execution time: 2_522_650_000 picoseconds. + Weight::from_parts(97_022_833, 0) .saturating_add(Weight::from_parts(0, 3510)) - // Standard Error: 55_189 - .saturating_add(Weight::from_parts(4_786_173, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 6_751 + .saturating_add(Weight::from_parts(4_990_018, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_set() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_453_000 picoseconds. - Weight::from_parts(6_857_000, 0) + // Minimum execution time: 3_833_000 picoseconds. + Weight::from_parts(4_108_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_remove() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_037_000 picoseconds. - Weight::from_parts(6_303_000, 0) + // Minimum execution time: 3_520_000 picoseconds. + Weight::from_parts(3_686_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:1 w:0) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:1 w:0) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1798` + // Measured: `1704` // Estimated: `6248` - // Minimum execution time: 72_578_000 picoseconds. - Weight::from_parts(74_232_000, 0) + // Minimum execution time: 63_983_000 picoseconds. + Weight::from_parts(66_140_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) fn force_apply_min_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `661` + // Measured: `658` // Estimated: `3510` - // Minimum execution time: 13_066_000 picoseconds. - Weight::from_parts(13_421_000, 0) + // Minimum execution time: 11_830_000 picoseconds. + Weight::from_parts(12_210_000, 0) .saturating_add(Weight::from_parts(0, 3510)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_min_commission() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_057_000 picoseconds. - Weight::from_parts(3_488_000, 0) + // Minimum execution time: 2_364_000 picoseconds. + Weight::from_parts(2_555_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/prdoc/pr_1289.prdoc b/prdoc/pr_1289.prdoc new file mode 100644 index 000000000000..f3d8801d9d82 --- /dev/null +++ b/prdoc/pr_1289.prdoc @@ -0,0 +1,28 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Supporting paged rewards allowing all nominators to be rewarded + +doc: + - audience: Validator + description: | + We used to clip top `MaxNominatorRewardedPerValidator` nominators by stake that are eligible for staking reward. + This was done to limit computation cost of paying out rewards. This PR introduces paging to reward payouts, + meaning we still clip nominators upto MaxExposurePageSize per page and there could be multiple pages of rewards to + be paid out. Validators get commission pro-rata to the amount of reward that is paid out for the page. + + notes: + - payout_stakers should be called multiple times, once for each page of nominators. + - payout_stakers_by_page can be used to pay out rewards for a specific page. + - Some old non-paged era storage items are deprecated, and can be removed in a future upgrade. + +migrations: + db: [] + + runtime: + - { pallet: "pallet-staking", description: "v14: Migration of era exposure storage items to paged exposures."} + +crates: + - name: pallet-staking + +host_functions: [] \ No newline at end of file diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index cb8d7f6b1de6..127faec35629 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -485,7 +485,7 @@ impl pallet_babe::Config for Runtime { type DisabledValidators = Session; type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -628,7 +628,7 @@ parameter_types! { pub const BondingDuration: sp_staking::EraIndex = 24 * 28; pub const SlashDeferDuration: sp_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const MaxNominatorRewardedPerValidator: u32 = 256; + pub const MaxNominators: u32 = 64; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub OffchainRepeat: BlockNumber = 5; pub HistoryDepth: u32 = 84; @@ -663,7 +663,7 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type MaxExposurePageSize = ConstU32<256>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; @@ -686,8 +686,6 @@ impl pallet_fast_unstake::Config for Runtime { type Currency = Balances; type Staking = Staking; type MaxErasToCheckPerBlock = ConstU32<1>; - #[cfg(feature = "runtime-benchmarks")] - type MaxBackersPerValidator = MaxNominatorRewardedPerValidator; type WeightInfo = (); } @@ -1453,7 +1451,7 @@ impl pallet_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - type MaxNominators = MaxNominatorRewardedPerValidator; + type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -2392,10 +2390,14 @@ impl_runtime_apis! { } } - impl pallet_staking_runtime_api::StakingApi for Runtime { + impl pallet_staking_runtime_api::StakingApi for Runtime { fn nominations_quota(balance: Balance) -> u32 { Staking::api_nominations_quota(balance) } + + fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { + Staking::api_eras_stakers_page_count(era, account) + } } impl sp_consensus_babe::BabeApi for Runtime { diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index e0b23afaf66a..0003c6f9f11a 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -174,7 +174,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index ec4e6fd97270..e65f1844f88f 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -440,7 +440,7 @@ fn report_equivocation_current_session_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(1, validator), + Staking::eras_stakers(1, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -481,7 +481,7 @@ fn report_equivocation_current_session_works() { assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); assert_eq!( - Staking::eras_stakers(2, offending_validator_id), + Staking::eras_stakers(2, &offending_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -494,7 +494,7 @@ fn report_equivocation_current_session_works() { assert_eq!(Balances::total_balance(validator), 10_000_000); assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -553,7 +553,7 @@ fn report_equivocation_old_session_works() { assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); assert_eq!( - Staking::eras_stakers(3, offending_validator_id), + Staking::eras_stakers(3, &offending_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); }) diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 8618fdab19ab..53d523cf724d 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -192,7 +192,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index bf1b204e0260..bf5ae19510ce 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -277,7 +277,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(1, validator), + Staking::eras_stakers(1, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -314,7 +314,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(2, equivocation_validator_id), + Staking::eras_stakers(2, &equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -328,7 +328,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -363,7 +363,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -397,7 +397,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(3, equivocation_validator_id), + Staking::eras_stakers(3, &equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -411,7 +411,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(3, validator), + Staking::eras_stakers(3, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 9195945f6cac..751ffc07aa5d 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -237,7 +237,6 @@ parameter_types! { pub const SessionsPerEra: sp_staking::SessionIndex = 2; pub const BondingDuration: sp_staking::EraIndex = 28; pub const SlashDeferDuration: sp_staking::EraIndex = 7; // 1/4 the bonding duration. - pub const MaxNominatorRewardedPerValidator: u32 = 256; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(40); pub HistoryDepth: u32 = 84; } @@ -269,7 +268,7 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = (); type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type MaxExposurePageSize = ConstU32<256>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; @@ -809,7 +808,7 @@ pub(crate) fn on_offence_now( pub(crate) fn add_slash(who: &AccountId) { on_offence_now( &[OffenceDetails { - offender: (*who, Staking::eras_stakers(active_era(), *who)), + offender: (*who, Staking::eras_stakers(active_era(), who)), reporters: vec![], }], &[Perbill::from_percent(10)], diff --git a/substrate/frame/fast-unstake/src/benchmarking.rs b/substrate/frame/fast-unstake/src/benchmarking.rs index 5ec997e8eaa2..851483e3697b 100644 --- a/substrate/frame/fast-unstake/src/benchmarking.rs +++ b/substrate/frame/fast-unstake/src/benchmarking.rs @@ -74,9 +74,9 @@ fn setup_staking(v: u32, until: EraIndex) { .collect::>(); for era in 0..=until { - let others = (0..T::MaxBackersPerValidator::get()) + let others = (0..T::Staking::max_exposure_page_size()) .map(|s| { - let who = frame_benchmarking::account::("nominator", era, s); + let who = frame_benchmarking::account::("nominator", era, s.into()); let value = ed; (who, value) }) diff --git a/substrate/frame/fast-unstake/src/lib.rs b/substrate/frame/fast-unstake/src/lib.rs index 2b99ad79a7df..153b6c2c353f 100644 --- a/substrate/frame/fast-unstake/src/lib.rs +++ b/substrate/frame/fast-unstake/src/lib.rs @@ -203,10 +203,6 @@ pub mod pallet { /// The weight information of this pallet. type WeightInfo: WeightInfo; - - /// Use only for benchmarking. - #[cfg(feature = "runtime-benchmarks")] - type MaxBackersPerValidator: Get; } /// The current "head of the queue" being unstaked. diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index 6b866224ab99..df133bdfd47f 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -134,7 +134,7 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); type HistoryDepth = ConstU32<84>; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = MockElection; type GenesisElectionProvider = Self::ElectionProvider; @@ -175,8 +175,6 @@ impl fast_unstake::Config for Runtime { type BatchSize = BatchSize; type WeightInfo = (); type MaxErasToCheckPerBlock = ConstU32<16>; - #[cfg(feature = "runtime-benchmarks")] - type MaxBackersPerValidator = ConstU32<128>; } type Block = frame_system::mocking::MockBlock; diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 79e3069d01d7..9afcec1c797a 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -196,7 +196,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 59d73ee729ee..993d72af6d41 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -333,7 +333,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(1, validator), + Staking::eras_stakers(1, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -371,7 +371,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(2, equivocation_validator_id), + Staking::eras_stakers(2, &equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -385,7 +385,7 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -417,7 +417,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, validator), + Staking::eras_stakers(2, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -450,7 +450,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(3, equivocation_validator_id), + Staking::eras_stakers(3, &equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); @@ -464,7 +464,7 @@ fn report_equivocation_old_set_works() { assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(3, validator), + Staking::eras_stakers(3, &validator), pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 3cbaed238354..9a7f2197a7b2 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -110,7 +110,7 @@ impl pallet_staking::Config for Runtime { type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index d683994c28d8..24bea0b87f22 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -202,6 +202,11 @@ impl sp_staking::StakingInterface for StakingMock { fn set_current_era(_era: EraIndex) { unimplemented!("method currently not used in testing") } + + #[cfg(feature = "runtime-benchmarks")] + fn max_exposure_page_size() -> sp_staking::Page { + unimplemented!("method currently not used in testing") + } } impl frame_system::Config for Runtime { diff --git a/substrate/frame/nomination-pools/test-staking/src/mock.rs b/substrate/frame/nomination-pools/test-staking/src/mock.rs index c36dc70cb467..0db24e9c2441 100644 --- a/substrate/frame/nomination-pools/test-staking/src/mock.rs +++ b/substrate/frame/nomination-pools/test-staking/src/mock.rs @@ -124,7 +124,7 @@ impl pallet_staking::Config for Runtime { type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index c877f955fba0..1a458ec90d58 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -176,7 +176,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index a93e7ff84871..e6bb5bb18819 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -111,7 +111,7 @@ pub mod pallet { .clone() .into_iter() .map(|(o, _)| OffenceDetails:: { - offender: (o.clone(), Staking::::eras_stakers(now, o)), + offender: (o.clone(), Staking::::eras_stakers(now, &o)), reporters: vec![], }) .collect()) diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 59ab539fcf61..82da429e00a5 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -179,7 +179,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index d3da12ef9a8c..47c337569a02 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -173,7 +173,7 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; diff --git a/substrate/frame/staking/CHANGELOG.md b/substrate/frame/staking/CHANGELOG.md new file mode 100644 index 000000000000..719aa388755f --- /dev/null +++ b/substrate/frame/staking/CHANGELOG.md @@ -0,0 +1,27 @@ +# Changelog + +All notable changes and migrations to pallet-staking will be documented in this file. + +The format is loosely based +on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). We maintain a +single integer version number for staking pallet to keep track of all storage +migrations. + +## [v14] + +### Added + +- New item `ErasStakersPaged` that keeps up to `MaxExposurePageSize` + individual nominator exposures by era, validator and page. +- New item `ErasStakersOverview` complementary to `ErasStakersPaged` which keeps + state of own and total stake of the validator across pages. +- New item `ClaimedRewards` to support paged rewards payout. + +### Deprecated + +- `ErasStakers` and `ErasStakersClipped` is deprecated, will not be used any longer for the exposures of the new era + post v14 and can be removed after 84 eras once all the exposures are stale. +- Field `claimed_rewards` in item `Ledger` is renamed + to `legacy_claimed_rewards` and can be removed after 84 eras. + +[v14]: https://github.com/paritytech/substrate/pull/13498 diff --git a/substrate/frame/staking/README.md b/substrate/frame/staking/README.md index 387b94b6a681..8c91cfcaa7fa 100644 --- a/substrate/frame/staking/README.md +++ b/substrate/frame/staking/README.md @@ -14,6 +14,7 @@ funds are rewarded under normal operation but are held at pain of _slash_ (expro be found not to be discharging its duties properly. ### Terminology + - Staking: The process of locking up funds for some time, placing them at risk of slashing (loss) in order to become a @@ -29,6 +30,7 @@ be found not to be discharging its duties properly. - Slash: The punishment of a staker by reducing its funds. ### Goals + The staking system in Substrate NPoS is designed to make the following possible: @@ -75,7 +77,7 @@ An account can become a validator candidate via the #### Nomination -A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on a set of validators to +A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on a set of validators to be elected. Once interest in nomination is stated by an account, it takes effect at the next election round. The funds in the nominator's stash account indicate the _weight_ of its vote. Both the rewards and any punishment that a validator earns are shared between the validator and its nominators. This rule incentivizes the nominators to NOT vote for the @@ -90,10 +92,12 @@ An account can become a nominator via the The **reward and slashing** procedure is the core of the Staking module, attempting to _embrace valid behavior_ while _punishing any misbehavior or lack of availability_. -Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the `payout_stakers` call. Any -account can call `payout_stakers`, which pays the reward to the validator as well as its nominators. Only the -[`Config::MaxNominatorRewardedPerValidator`] biggest stakers can claim their reward. This is to limit the i/o cost to -mutate storage for each nominator's account. +Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the `payout_stakers` call. When a +validator has more than [`Config::MaxExposurePageSize`] nominators, nominators are divided into pages with each call to +`payout_stakers` paying rewards to one page of nominators in a sequential and ascending manner. Any account can also +call `payout_stakers_by_page` to explicitly pay reward for a given page. As evident, this means only the +[`Config::MaxExposurePageSize`] nominators are rewarded per call. This is to limit the i/o cost to mutate storage for +each nominator's account. Slashing can occur at any point in time, once misbehavior is reported. Once slashing is determined, a value is deducted from the balance of the validator and all the nominators who voted for this validator (values are deducted from the @@ -173,11 +177,13 @@ such: ```nocompile staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year ``` + This payout is used to reward stakers as defined in next section ```nocompile remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout ``` + The remaining reward is send to the configurable end-point [`T::RewardRemainder`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.RewardRemainder). diff --git a/substrate/frame/staking/runtime-api/Cargo.toml b/substrate/frame/staking/runtime-api/Cargo.toml index 5f49df254ceb..746b463b8ce2 100644 --- a/substrate/frame/staking/runtime-api/Cargo.toml +++ b/substrate/frame/staking/runtime-api/Cargo.toml @@ -14,8 +14,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../../primitives/api", default-features = false} +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } [features] default = [ "std" ] -std = [ "codec/std", "sp-api/std" ] +std = [ "codec/std", "sp-api/std", "sp-staking/std" ] diff --git a/substrate/frame/staking/runtime-api/src/lib.rs b/substrate/frame/staking/runtime-api/src/lib.rs index c669d222ec68..b04c383a077d 100644 --- a/substrate/frame/staking/runtime-api/src/lib.rs +++ b/substrate/frame/staking/runtime-api/src/lib.rs @@ -22,11 +22,15 @@ use codec::Codec; sp_api::decl_runtime_apis! { - pub trait StakingApi + pub trait StakingApi where Balance: Codec, + AccountId: Codec, { /// Returns the nominations quota for a nominator with a given balance. fn nominations_quota(balance: Balance) -> u32; + + /// Returns the page count of exposures for a validator in a given era. + fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page; } } diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index f94d9bf4b328..05c6bc397097 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -552,10 +552,10 @@ benchmarks! { } payout_stakers_dead_controller { - let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let n in 0 .. T::MaxExposurePageSize::get() as u32; let (validator, nominators) = create_validator_with_nominators::( n, - T::MaxNominatorRewardedPerValidator::get() as u32, + T::MaxExposurePageSize::get() as u32, true, true, RewardDestination::Controller, @@ -572,7 +572,7 @@ benchmarks! { let balance = T::Currency::free_balance(controller); ensure!(balance.is_zero(), "Controller has balance, but should be dead."); } - }: payout_stakers(RawOrigin::Signed(caller), validator, current_era) + }: payout_stakers_by_page(RawOrigin::Signed(caller), validator, current_era, 0) verify { let balance_after = T::Currency::free_balance(&validator_controller); ensure!( @@ -586,10 +586,10 @@ benchmarks! { } payout_stakers_alive_staked { - let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let n in 0 .. T::MaxExposurePageSize::get() as u32; let (validator, nominators) = create_validator_with_nominators::( n, - T::MaxNominatorRewardedPerValidator::get() as u32, + T::MaxExposurePageSize::get() as u32, false, true, RewardDestination::Staked, @@ -687,7 +687,6 @@ benchmarks! { let l = StakingLedger::::new( stash.clone(), T::Currency::minimum_balance() - One::one(), - Default::default(), ); Ledger::::insert(&controller, l); @@ -760,7 +759,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let origin = RawOrigin::Signed(caller); let calls: Vec<_> = payout_calls_arg.iter().map(|arg| - Call::::payout_stakers { validator_stash: arg.0.clone(), era: arg.1 }.encode() + Call::::payout_stakers_by_page { validator_stash: arg.0.clone(), era: arg.1, page: 0 }.encode() ).collect(); }: { for call in calls { @@ -984,7 +983,7 @@ mod tests { let (validator_stash, nominators) = create_validator_with_nominators::( n, - <::MaxNominatorRewardedPerValidator as Get<_>>::get(), + <::MaxExposurePageSize as Get<_>>::get(), false, false, RewardDestination::Staked, @@ -996,10 +995,11 @@ mod tests { let current_era = CurrentEra::::get().unwrap(); let original_free_balance = Balances::free_balance(&validator_stash); - assert_ok!(Staking::payout_stakers( + assert_ok!(Staking::payout_stakers_by_page( RuntimeOrigin::signed(1337), validator_stash, - current_era + current_era, + 0 )); let new_free_balance = Balances::free_balance(&validator_stash); @@ -1014,7 +1014,7 @@ mod tests { let (validator_stash, _nominators) = create_validator_with_nominators::( n, - <::MaxNominatorRewardedPerValidator as Get<_>>::get(), + <::MaxExposurePageSize as Get<_>>::get(), false, false, RewardDestination::Staked, diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs index cf9b4635bf55..84bb4d167dcb 100644 --- a/substrate/frame/staking/src/ledger.rs +++ b/substrate/frame/staking/src/ledger.rs @@ -34,9 +34,8 @@ use frame_support::{ defensive, traits::{LockableCurrency, WithdrawReasons}, - BoundedVec, }; -use sp_staking::{EraIndex, StakingAccount}; +use sp_staking::StakingAccount; use sp_std::prelude::*; use crate::{ @@ -54,7 +53,7 @@ impl StakingLedger { total: Zero::zero(), active: Zero::zero(), unlocking: Default::default(), - claimed_rewards: Default::default(), + legacy_claimed_rewards: Default::default(), controller: Some(stash), } } @@ -66,17 +65,13 @@ impl StakingLedger { /// /// Note: as the controller accounts are being deprecated, the stash account is the same as the /// controller account. - pub fn new( - stash: T::AccountId, - stake: BalanceOf, - claimed_rewards: BoundedVec, - ) -> Self { + pub fn new(stash: T::AccountId, stake: BalanceOf) -> Self { Self { stash: stash.clone(), active: stake, total: stake, unlocking: Default::default(), - claimed_rewards, + legacy_claimed_rewards: Default::default(), // controllers are deprecated and mapped 1-1 to stashes. controller: Some(stash), } @@ -240,8 +235,8 @@ pub struct StakingLedgerInspect { pub total: BalanceOf, #[codec(compact)] pub active: BalanceOf, - pub unlocking: BoundedVec>, T::MaxUnlockingChunks>, - pub claimed_rewards: BoundedVec, + pub unlocking: frame_support::BoundedVec>, T::MaxUnlockingChunks>, + pub legacy_claimed_rewards: frame_support::BoundedVec, } #[cfg(test)] @@ -251,7 +246,7 @@ impl PartialEq> for StakingLedger { self.total == other.total && self.active == other.active && self.unlocking == other.unlocking && - self.claimed_rewards == other.claimed_rewards + self.legacy_claimed_rewards == other.legacy_claimed_rewards } } diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 227326763a9b..9e4697e845b6 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -112,11 +112,15 @@ //! The **reward and slashing** procedure is the core of the Staking pallet, attempting to _embrace //! valid behavior_ while _punishing any misbehavior or lack of availability_. //! -//! Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the -//! `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the -//! validator as well as its nominators. Only the [`Config::MaxNominatorRewardedPerValidator`] -//! biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each -//! nominator's account. +//! Rewards must be claimed for each era before it gets too old by +//! [`HistoryDepth`](`Config::HistoryDepth`) using the `payout_stakers` call. Any account can call +//! `payout_stakers`, which pays the reward to the validator as well as its nominators. Only +//! [`Config::MaxExposurePageSize`] nominator rewards can be claimed in a single call. When the +//! number of nominators exceeds [`Config::MaxExposurePageSize`], then the exposed nominators are +//! stored in multiple pages, with each page containing up to +//! [`Config::MaxExposurePageSize`] nominators. To pay out all nominators, `payout_stakers` must be +//! called once for each available page. Paging exists to limit the i/o cost to mutate storage for +//! each nominator's account. //! //! Slashing can occur at any point in time, once misbehavior is reported. Once slashing is //! determined, a value is deducted from the balance of the validator and all the nominators who @@ -224,13 +228,13 @@ //! The validator can declare an amount, named [`commission`](ValidatorPrefs::commission), that does //! not get shared with the nominators at each reward payout through its [`ValidatorPrefs`]. This //! value gets deducted from the total reward that is paid to the validator and its nominators. The -//! remaining portion is split pro rata among the validator and the top -//! [`Config::MaxNominatorRewardedPerValidator`] nominators that nominated the validator, -//! proportional to the value staked behind the validator (_i.e._ dividing the +//! remaining portion is split pro rata among the validator and the nominators that nominated the +//! validator, proportional to the value staked behind the validator (_i.e._ dividing the //! [`own`](Exposure::own) or [`others`](Exposure::others) by [`total`](Exposure::total) in -//! [`Exposure`]). Note that the pro rata division of rewards uses the total exposure behind the -//! validator, *not* just the exposure of the validator and the top -//! [`Config::MaxNominatorRewardedPerValidator`] nominators. +//! [`Exposure`]). Note that payouts are made in pages with each page capped at +//! [`Config::MaxExposurePageSize`] nominators. The distribution of nominators across +//! pages may be unsorted. The total commission is paid out proportionally across pages based on the +//! total stake of the page. //! //! All entities who receive a reward have the option to choose their reward destination through the //! [`Payee`] storage item (see @@ -303,7 +307,10 @@ mod pallet; use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; use frame_support::{ - traits::{ConstU32, Currency, Defensive, Get, LockIdentifier}, + defensive, defensive_assert, + traits::{ + ConstU32, Currency, Defensive, DefensiveMax, DefensiveSaturating, Get, LockIdentifier, + }, weights::Weight, BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; @@ -313,11 +320,12 @@ use sp_runtime::{ traits::{AtLeast32BitUnsigned, Convert, StaticLookup, Zero}, Perbill, Perquintill, Rounding, RuntimeDebug, Saturating, }; -pub use sp_staking::StakerStatus; use sp_staking::{ offence::{Offence, OffenceError, ReportOffence}, - EraIndex, OnStakingUpdate, SessionIndex, StakingAccount, + EraIndex, ExposurePage, OnStakingUpdate, Page, PagedExposureMetadata, SessionIndex, + StakingAccount, }; +pub use sp_staking::{Exposure, IndividualExposure, StakerStatus}; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; pub use weights::WeightInfo; @@ -457,21 +465,29 @@ pub struct UnlockChunk { pub struct StakingLedger { /// The stash account whose balance is actually locked and at stake. pub stash: T::AccountId, + /// The total amount of the stash's balance that we are currently accounting for. /// It's just `active` plus all the `unlocking` balances. #[codec(compact)] pub total: BalanceOf, + /// The total amount of the stash's balance that will be at stake in any forthcoming /// rounds. #[codec(compact)] pub active: BalanceOf, + /// Any balance that is becoming free, which may eventually be transferred out of the stash /// (assuming it doesn't get slashed first). It is assumed that this will be treated as a first /// in, first out queue where the new (higher value) eras get pushed on the back. pub unlocking: BoundedVec>, T::MaxUnlockingChunks>, + /// List of eras for which the stakers behind a validator have claimed rewards. Only updated /// for validators. - pub claimed_rewards: BoundedVec, + /// + /// This is deprecated as of V14 in favor of `T::ClaimedRewards` and will be removed in future. + /// Refer to issue + pub legacy_claimed_rewards: BoundedVec, + /// The controller associated with this ledger's stash. /// /// This is not stored on-chain, and is only bundled when the ledger is read from storage. @@ -507,7 +523,7 @@ impl StakingLedger { total, active: self.active, unlocking, - claimed_rewards: self.claimed_rewards, + legacy_claimed_rewards: self.legacy_claimed_rewards, controller: self.controller, } } @@ -708,32 +724,50 @@ pub struct Nominations { pub suppressed: bool, } -/// The amount of exposure (to slashing) than an individual nominator has. -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct IndividualExposure { - /// The stash account of the nominator in question. - pub who: AccountId, - /// Amount of funds exposed. - #[codec(compact)] - pub value: Balance, +/// Facade struct to encapsulate `PagedExposureMetadata` and a single page of `ExposurePage`. +/// +/// This is useful where we need to take into account the validator's own stake and total exposure +/// in consideration, in addition to the individual nominators backing them. +#[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Eq)] +struct PagedExposure { + exposure_metadata: PagedExposureMetadata, + exposure_page: ExposurePage, } -/// A snapshot of the stake backing a single validator in the system. -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct Exposure { - /// The total balance backing this validator. - #[codec(compact)] - pub total: Balance, - /// The validator's own stash that is exposed. - #[codec(compact)] - pub own: Balance, - /// The portions of nominators stashes that are exposed. - pub others: Vec>, -} +impl + PagedExposure +{ + /// Create a new instance of `PagedExposure` from legacy clipped exposures. + pub fn from_clipped(exposure: Exposure) -> Self { + Self { + exposure_metadata: PagedExposureMetadata { + total: exposure.total, + own: exposure.own, + nominator_count: exposure.others.len() as u32, + page_count: 1, + }, + exposure_page: ExposurePage { page_total: exposure.total, others: exposure.others }, + } + } -impl Default for Exposure { - fn default() -> Self { - Self { total: Default::default(), own: Default::default(), others: vec![] } + /// Returns total exposure of this validator across pages + pub fn total(&self) -> Balance { + self.exposure_metadata.total + } + + /// Returns total exposure of this validator for the current page + pub fn page_total(&self) -> Balance { + self.exposure_page.page_total + self.exposure_metadata.own + } + + /// Returns validator's own stake that is exposed + pub fn own(&self) -> Balance { + self.exposure_metadata.own + } + + /// Returns the portions of nominators stashes that are exposed in this page. + pub fn others(&self) -> &Vec> { + &self.exposure_page.others } } @@ -985,6 +1019,195 @@ where } } +/// Wrapper struct for Era related information. It is not a pure encapsulation as these storage +/// items can be accessed directly but nevertheless, its recommended to use `EraInfo` where we +/// can and add more functions to it as needed. +pub(crate) struct EraInfo(sp_std::marker::PhantomData); +impl EraInfo { + /// Temporary function which looks at both (1) passed param `T::StakingLedger` for legacy + /// non-paged rewards, and (2) `T::ClaimedRewards` for paged rewards. This function can be + /// removed once `T::HistoryDepth` eras have passed and none of the older non-paged rewards + /// are relevant/claimable. + // Refer tracker issue for cleanup: #13034 + pub(crate) fn is_rewards_claimed_with_legacy_fallback( + era: EraIndex, + ledger: &StakingLedger, + validator: &T::AccountId, + page: Page, + ) -> bool { + ledger.legacy_claimed_rewards.binary_search(&era).is_ok() || + Self::is_rewards_claimed(era, validator, page) + } + + /// Check if the rewards for the given era and page index have been claimed. + /// + /// This is only used for paged rewards. Once older non-paged rewards are no longer + /// relevant, `is_rewards_claimed_with_legacy_fallback` can be removed and this function can + /// be made public. + fn is_rewards_claimed(era: EraIndex, validator: &T::AccountId, page: Page) -> bool { + ClaimedRewards::::get(era, validator).contains(&page) + } + + /// Get exposure for a validator at a given era and page. + /// + /// This builds a paged exposure from `PagedExposureMetadata` and `ExposurePage` of the + /// validator. For older non-paged exposure, it returns the clipped exposure directly. + pub(crate) fn get_paged_exposure( + era: EraIndex, + validator: &T::AccountId, + page: Page, + ) -> Option>> { + let overview = >::get(&era, validator); + + // return clipped exposure if page zero and paged exposure does not exist + // exists for backward compatibility and can be removed as part of #13034 + if overview.is_none() && page == 0 { + return Some(PagedExposure::from_clipped(>::get(era, validator))) + } + + // no exposure for this validator + if overview.is_none() { + return None + } + + let overview = overview.expect("checked above; qed"); + + // validator stake is added only in page zero + let validator_stake = if page == 0 { overview.own } else { Zero::zero() }; + + // since overview is present, paged exposure will always be present except when a + // validator has only own stake and no nominator stake. + let exposure_page = >::get((era, validator, page)).unwrap_or_default(); + + // build the exposure + Some(PagedExposure { + exposure_metadata: PagedExposureMetadata { own: validator_stake, ..overview }, + exposure_page, + }) + } + + /// Get full exposure of the validator at a given era. + pub(crate) fn get_full_exposure( + era: EraIndex, + validator: &T::AccountId, + ) -> Exposure> { + let overview = >::get(&era, validator); + + if overview.is_none() { + return ErasStakers::::get(era, validator) + } + + let overview = overview.expect("checked above; qed"); + + let mut others = Vec::with_capacity(overview.nominator_count as usize); + for page in 0..overview.page_count { + let nominators = >::get((era, validator, page)); + others.append(&mut nominators.map(|n| n.others).defensive_unwrap_or_default()); + } + + Exposure { total: overview.total, own: overview.own, others } + } + + /// Returns the number of pages of exposure a validator has for the given era. + /// + /// For eras where paged exposure does not exist, this returns 1 to keep backward compatibility. + pub(crate) fn get_page_count(era: EraIndex, validator: &T::AccountId) -> Page { + >::get(&era, validator) + .map(|overview| { + if overview.page_count == 0 && overview.own > Zero::zero() { + // Even though there are no nominator pages, there is still validator's own + // stake exposed which needs to be paid out in a page. + 1 + } else { + overview.page_count + } + }) + // Always returns 1 page for older non-paged exposure. + // FIXME: Can be cleaned up with issue #13034. + .unwrap_or(1) + } + + /// Returns the next page that can be claimed or `None` if nothing to claim. + pub(crate) fn get_next_claimable_page( + era: EraIndex, + validator: &T::AccountId, + ledger: &StakingLedger, + ) -> Option { + if Self::is_non_paged_exposure(era, validator) { + return match ledger.legacy_claimed_rewards.binary_search(&era) { + // already claimed + Ok(_) => None, + // Non-paged exposure is considered as a single page + Err(_) => Some(0), + } + } + + // Find next claimable page of paged exposure. + let page_count = Self::get_page_count(era, validator); + let all_claimable_pages: Vec = (0..page_count).collect(); + let claimed_pages = ClaimedRewards::::get(era, validator); + + all_claimable_pages.into_iter().find(|p| !claimed_pages.contains(p)) + } + + /// Checks if exposure is paged or not. + fn is_non_paged_exposure(era: EraIndex, validator: &T::AccountId) -> bool { + >::contains_key(&era, validator) + } + + /// Returns validator commission for this era and page. + pub(crate) fn get_validator_commission( + era: EraIndex, + validator_stash: &T::AccountId, + ) -> Perbill { + >::get(&era, validator_stash).commission + } + + /// Creates an entry to track validator reward has been claimed for a given era and page. + /// Noop if already claimed. + pub(crate) fn set_rewards_as_claimed(era: EraIndex, validator: &T::AccountId, page: Page) { + let mut claimed_pages = ClaimedRewards::::get(era, validator); + + // this should never be called if the reward has already been claimed + if claimed_pages.contains(&page) { + defensive!("Trying to set an already claimed reward"); + // nevertheless don't do anything since the page already exist in claimed rewards. + return + } + + // add page to claimed entries + claimed_pages.push(page); + ClaimedRewards::::insert(era, validator, claimed_pages); + } + + /// Store exposure for elected validators at start of an era. + pub(crate) fn set_exposure( + era: EraIndex, + validator: &T::AccountId, + exposure: Exposure>, + ) { + let page_size = T::MaxExposurePageSize::get().defensive_max(1); + + let nominator_count = exposure.others.len(); + // expected page count is the number of nominators divided by the page size, rounded up. + let expected_page_count = + nominator_count.defensive_saturating_add(page_size as usize - 1) / page_size as usize; + + let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size); + defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count"); + + >::insert(era, &validator, &exposure_metadata); + exposure_pages.iter().enumerate().for_each(|(page, paged_exposure)| { + >::insert((era, &validator, page as Page), &paged_exposure); + }); + } + + /// Store total exposure for all the elected validators in the era. + pub(crate) fn set_total_stake(era: EraIndex, total_stake: BalanceOf) { + >::insert(era, total_stake); + } +} + /// Configurations of the benchmarking of the pallet. pub trait BenchmarkingConfig { /// The maximum number of validators to use. diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 89520028b901..84b00254126f 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -14,7 +14,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -//! Storage migrations for the Staking pallet. +//! Storage migrations for the Staking pallet. The changelog for this is maintained at +//! [CHANGELOG.md](https://github.com/paritytech/substrate/blob/master/frame/staking/CHANGELOG.md). use super::*; use frame_election_provider_support::SortedListProvider; @@ -58,6 +59,49 @@ impl Default for ObsoleteReleases { #[storage_alias] type StorageVersion = StorageValue, ObsoleteReleases, ValueQuery>; +/// Migration of era exposure storage items to paged exposures. +/// Changelog: [v14.](https://github.com/paritytech/substrate/blob/ankan/paged-rewards-rebased2/frame/staking/CHANGELOG.md#14) +pub mod v14 { + use super::*; + + pub struct MigrateToV14(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV14 { + fn on_runtime_upgrade() -> Weight { + let current = Pallet::::current_storage_version(); + let on_chain = Pallet::::on_chain_storage_version(); + + if current == 14 && on_chain == 13 { + current.put::>(); + + log!(info, "v14 applied successfully."); + T::DbWeight::get().reads_writes(1, 1) + } else { + log!(warn, "v14 not applied."); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + frame_support::ensure!( + Pallet::::on_chain_storage_version() == 13, + "Required v13 before upgrading to v14." + ); + + Ok(Default::default()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + frame_support::ensure!( + Pallet::::on_chain_storage_version() == 14, + "v14 not applied" + ); + Ok(()) + } + } +} + pub mod v13 { use super::*; @@ -113,9 +157,9 @@ pub mod v12 { #[storage_alias] type HistoryDepth = StorageValue, u32, ValueQuery>; - /// Clean up `HistoryDepth` from storage. + /// Clean up `T::HistoryDepth` from storage. /// - /// We will be depending on the configurable value of `HistoryDepth` post + /// We will be depending on the configurable value of `T::HistoryDepth` post /// this release. pub struct MigrateToV12(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV12 { diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index c694ce004dd6..d2afd8f26e24 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -25,8 +25,8 @@ use frame_election_provider_support::{ use frame_support::{ assert_ok, ord_parameter_types, parameter_types, traits::{ - ConstU32, ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, - OnUnbalanced, OneSessionHandler, + ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, OnUnbalanced, + OneSessionHandler, }, weights::constants::RocksDbWeight, }; @@ -236,6 +236,7 @@ const THRESHOLDS: [sp_npos_elections::VoteWeight; 9] = parameter_types! { pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; pub static HistoryDepth: u32 = 80; + pub static MaxExposurePageSize: u32 = 64; pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; pub static MaxWinners: u32 = 100; @@ -304,7 +305,7 @@ impl crate::pallet::pallet::Config for Test { type SessionInterface = Self; type EraPayout = ConvertCurve; type NextNewSession = Session; - type MaxNominatorRewardedPerValidator = ConstU32<64>; + type MaxExposurePageSize = MaxExposurePageSize; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -760,7 +761,7 @@ pub(crate) fn on_offence_now( pub(crate) fn add_slash(who: &AccountId) { on_offence_now( &[OffenceDetails { - offender: (*who, Staking::eras_stakers(active_era(), *who)), + offender: (*who, Staking::eras_stakers(active_era(), who)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -778,7 +779,14 @@ pub(crate) fn make_all_reward_payment(era: EraIndex) { // reward validators for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { let ledger = >::get(&validator_controller).unwrap(); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), ledger.stash, era)); + for page in 0..EraInfo::::get_page_count(era, &ledger.stash) { + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + ledger.stash, + era, + page + )); + } } } diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index ad2de1d59315..bb16fe56d51e 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -27,8 +27,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, Defensive, DefensiveResult, EstimateNextNewSession, Get, Imbalance, OnUnbalanced, - TryCollect, UnixTime, + Currency, Defensive, EstimateNextNewSession, Get, Imbalance, Len, OnUnbalanced, TryCollect, + UnixTime, }, weights::Weight, }; @@ -41,7 +41,7 @@ use sp_runtime::{ use sp_staking::{ currency_to_vote::CurrencyToVote, offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, - EraIndex, SessionIndex, Stake, + EraIndex, Page, SessionIndex, Stake, StakingAccount::{self, Controller, Stash}, StakingInterface, }; @@ -49,9 +49,9 @@ use sp_std::prelude::*; use crate::{ election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, - BalanceOf, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, MaxNominationsOf, - MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, - SessionInterface, StakingLedger, ValidatorPrefs, + BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, + MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, + RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, }; use super::pallet::*; @@ -158,12 +158,31 @@ impl Pallet { pub(super) fn do_payout_stakers( validator_stash: T::AccountId, era: EraIndex, + ) -> DispatchResultWithPostInfo { + let controller = Self::bonded(&validator_stash).ok_or_else(|| { + Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + let ledger = >::get(&controller).ok_or(Error::::NotController)?; + let page = EraInfo::::get_next_claimable_page(era, &validator_stash, &ledger) + .ok_or_else(|| { + Error::::AlreadyClaimed + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + + Self::do_payout_stakers_by_page(validator_stash, era, page) + } + + pub(super) fn do_payout_stakers_by_page( + validator_stash: T::AccountId, + era: EraIndex, + page: Page, ) -> DispatchResultWithPostInfo { // Validate input data let current_era = CurrentEra::::get().ok_or_else(|| { Error::::InvalidEraToReward .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) })?; + let history_depth = T::HistoryDepth::get(); ensure!( era <= current_era && era >= current_era.saturating_sub(history_depth), @@ -171,8 +190,13 @@ impl Pallet { .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) ); + ensure!( + page < EraInfo::::get_page_count(era, &validator_stash), + Error::::InvalidPage.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + ); + // Note: if era has no reward to be claimed, era may be future. better not to update - // `ledger.claimed_rewards` in this case. + // `ledger.legacy_claimed_rewards` in this case. let era_payout = >::get(&era).ok_or_else(|| { Error::::InvalidEraToReward .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) @@ -186,31 +210,29 @@ impl Pallet { Err(Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0))) } })?; - let stash = ledger.stash.clone(); + // clean up older claimed rewards ledger - .claimed_rewards + .legacy_claimed_rewards .retain(|&x| x >= current_era.saturating_sub(history_depth)); + ledger.clone().update()?; + + let stash = ledger.stash.clone(); - match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => - return Err(Error::::AlreadyClaimed - .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))), - Err(pos) => ledger - .claimed_rewards - .try_insert(pos, era) - // Since we retain era entries in `claimed_rewards` only upto - // `HistoryDepth`, following bound is always expected to be - // satisfied. - .defensive_map_err(|_| Error::::BoundNotMet)?, + if EraInfo::::is_rewards_claimed_with_legacy_fallback(era, &ledger, &stash, page) { + return Err(Error::::AlreadyClaimed + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))) + } else { + EraInfo::::set_rewards_as_claimed(era, &stash, page); } - let exposure = >::get(&era, &stash); + let exposure = EraInfo::::get_paged_exposure(era, &stash, page).ok_or_else(|| { + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; // Input data seems good, no errors allowed after this point - ledger.update()?; - // Get Era reward points. It has TOTAL and INDIVIDUAL // Find the fraction of the era reward that belongs to the validator // Take that fraction of the eras rewards to split to nominator and validator @@ -236,15 +258,17 @@ impl Pallet { // This is how much validator + nominators are entitled to. let validator_total_payout = validator_total_reward_part * era_payout; - let validator_prefs = Self::eras_validator_prefs(&era, &validator_stash); - // Validator first gets a cut off the top. - let validator_commission = validator_prefs.commission; - let validator_commission_payout = validator_commission * validator_total_payout; + let validator_commission = EraInfo::::get_validator_commission(era, &ledger.stash); + // total commission validator takes across all nominator pages + let validator_total_commission_payout = validator_commission * validator_total_payout; - let validator_leftover_payout = validator_total_payout - validator_commission_payout; + let validator_leftover_payout = validator_total_payout - validator_total_commission_payout; // Now let's calculate how this is split to the validator. - let validator_exposure_part = Perbill::from_rational(exposure.own, exposure.total); + let validator_exposure_part = Perbill::from_rational(exposure.own(), exposure.total()); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; + let page_stake_part = Perbill::from_rational(exposure.page_total(), exposure.total()); + // validator commission is paid out in fraction across pages proportional to the page stake. + let validator_commission_payout = page_stake_part * validator_total_commission_payout; Self::deposit_event(Event::::PayoutStarted { era_index: era, @@ -267,8 +291,8 @@ impl Pallet { // Lets now calculate how this is split to the nominators. // Reward only the clipped exposures. Note this is not necessarily sorted. - for nominator in exposure.others.iter() { - let nominator_exposure_part = Perbill::from_rational(nominator.value, exposure.total); + for nominator in exposure.others().iter() { + let nominator_exposure_part = Perbill::from_rational(nominator.value, exposure.total()); let nominator_reward: BalanceOf = nominator_exposure_part * validator_leftover_payout; @@ -287,7 +311,8 @@ impl Pallet { } T::Reward::on_unbalanced(total_imbalance); - debug_assert!(nominator_payout_count <= T::MaxNominatorRewardedPerValidator::get()); + debug_assert!(nominator_payout_count <= T::MaxExposurePageSize::get()); + Ok(Some(T::WeightInfo::payout_stakers_alive_staked(nominator_payout_count)).into()) } @@ -306,6 +331,11 @@ impl Pallet { stash: &T::AccountId, amount: BalanceOf, ) -> Option<(PositiveImbalanceOf, RewardDestination)> { + // noop if amount is zero + if amount.is_zero() { + return None + } + let dest = Self::payee(StakingAccount::Stash(stash.clone())); let maybe_imbalance = match dest { RewardDestination::Controller => Self::bonded(stash) @@ -587,31 +617,24 @@ impl Pallet { >, new_planned_era: EraIndex, ) -> BoundedVec> { - let elected_stashes: BoundedVec<_, MaxWinnersOf> = exposures - .iter() - .cloned() - .map(|(x, _)| x) - .collect::>() - .try_into() - .expect("since we only map through exposures, size of elected_stashes is always same as exposures; qed"); - - // Populate stakers, exposures, and the snapshot of validator prefs. + // Populate elected stash, stakers, exposures, and the snapshot of validator prefs. let mut total_stake: BalanceOf = Zero::zero(); + let mut elected_stashes = Vec::with_capacity(exposures.len()); + exposures.into_iter().for_each(|(stash, exposure)| { + // build elected stash + elected_stashes.push(stash.clone()); + // accumulate total stake total_stake = total_stake.saturating_add(exposure.total); - >::insert(new_planned_era, &stash, &exposure); - - let mut exposure_clipped = exposure; - let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; - if exposure_clipped.others.len() > clipped_max_len { - exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse()); - exposure_clipped.others.truncate(clipped_max_len); - } - >::insert(&new_planned_era, &stash, exposure_clipped); + // store staker exposure for this era + EraInfo::::set_exposure(new_planned_era, &stash, exposure); }); - // Insert current era staking information - >::insert(&new_planned_era, total_stake); + let elected_stashes: BoundedVec<_, MaxWinnersOf> = elected_stashes + .try_into() + .expect("elected_stashes.len() always equal to exposures.len(); qed"); + + EraInfo::::set_total_stake(new_planned_era, total_stake); // Collect the pref of all winners. for stash in &elected_stashes { @@ -692,12 +715,21 @@ impl Pallet { /// Clear all era information for given era. pub(crate) fn clear_era_information(era_index: EraIndex) { + // FIXME: We can possibly set a reasonable limit since we do this only once per era and + // clean up state across multiple blocks. let mut cursor = >::clear_prefix(era_index, u32::MAX, None); debug_assert!(cursor.maybe_cursor.is_none()); cursor = >::clear_prefix(era_index, u32::MAX, None); debug_assert!(cursor.maybe_cursor.is_none()); cursor = >::clear_prefix(era_index, u32::MAX, None); debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix(era_index, u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix((era_index,), u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix(era_index, u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + >::remove(era_index); >::remove(era_index); >::remove(era_index); @@ -1036,6 +1068,18 @@ impl Pallet { DispatchClass::Mandatory, ); } + + /// Returns full exposure of a validator for a given era. + /// + /// History note: This used to be a getter for old storage item `ErasStakers` deprecated in v14. + /// Since this function is used in the codebase at various places, we kept it as a custom getter + /// that takes care of getting the full exposure of the validator in a backward compatible way. + pub fn eras_stakers( + era: EraIndex, + account: &T::AccountId, + ) -> Exposure> { + EraInfo::::get_full_exposure(era, account) + } } impl Pallet { @@ -1045,6 +1089,17 @@ impl Pallet { pub fn api_nominations_quota(balance: BalanceOf) -> u32 { T::NominationsQuota::get_quota(balance) } + + pub fn api_eras_stakers( + era: EraIndex, + account: T::AccountId, + ) -> Exposure> { + Self::eras_stakers(era, &account) + } + + pub fn api_eras_stakers_page_count(era: EraIndex, account: T::AccountId) -> Page { + EraInfo::::get_page_count(era, &account) + } } impl ElectionDataProvider for Pallet { @@ -1129,10 +1184,7 @@ impl ElectionDataProvider for Pallet { panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") }); >::insert(voter.clone(), voter.clone()); - >::insert( - voter.clone(), - StakingLedger::::new(voter.clone(), stake, Default::default()), - ); + >::insert(voter.clone(), StakingLedger::::new(voter.clone(), stake)); Self::do_add_nominator(&voter, Nominations { targets, submitted_in: 0, suppressed: false }); } @@ -1141,10 +1193,7 @@ impl ElectionDataProvider for Pallet { fn add_target(target: T::AccountId) { let stake = MinValidatorBond::::get() * 100u32.into(); >::insert(target.clone(), target.clone()); - >::insert( - target.clone(), - StakingLedger::::new(target.clone(), stake, Default::default()), - ); + >::insert(target.clone(), StakingLedger::::new(target.clone(), stake)); Self::do_add_validator( &target, ValidatorPrefs { commission: Perbill::zero(), blocked: false }, @@ -1176,10 +1225,7 @@ impl ElectionDataProvider for Pallet { .and_then(|w| >::try_from(w).ok()) .unwrap_or_else(|| MinNominatorBond::::get() * 100u32.into()); >::insert(v.clone(), v.clone()); - >::insert( - v.clone(), - StakingLedger::::new(v.clone(), stake, Default::default()), - ); + >::insert(v.clone(), StakingLedger::::new(v.clone(), stake)); Self::do_add_validator( &v, ValidatorPrefs { commission: Perbill::zero(), blocked: false }, @@ -1191,10 +1237,7 @@ impl ElectionDataProvider for Pallet { panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") }); >::insert(v.clone(), v.clone()); - >::insert( - v.clone(), - StakingLedger::::new(v.clone(), stake, Default::default()), - ); + >::insert(v.clone(), StakingLedger::::new(v.clone(), stake)); Self::do_add_nominator( &v, Nominations { targets: t, submitted_in: 0, suppressed: false }, @@ -1622,31 +1665,12 @@ impl StakingInterface for Pallet { MinValidatorBond::::get() } - fn desired_validator_count() -> u32 { - ValidatorCount::::get() - } - - fn election_ongoing() -> bool { - T::ElectionProvider::ongoing() - } - - fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { - let num_slashing_spans = Self::slashing_spans(&who).map_or(0, |s| s.iter().count() as u32); - Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) - } - fn stash_by_ctrl(controller: &Self::AccountId) -> Result { Self::ledger(Controller(controller.clone())) .map(|l| l.stash) .map_err(|e| e.into()) } - fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { - ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { - validator == *who || exposures.others.iter().any(|i| i.who == *who) - }) - } - fn bonding_duration() -> EraIndex { T::BondingDuration::get() } @@ -1707,6 +1731,24 @@ impl StakingInterface for Pallet { Self::nominate(RawOrigin::Signed(ctrl).into(), targets) } + fn desired_validator_count() -> u32 { + ValidatorCount::::get() + } + + fn election_ongoing() -> bool { + T::ElectionProvider::ongoing() + } + + fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { + let num_slashing_spans = Self::slashing_spans(&who).map_or(0, |s| s.iter().count() as u32); + Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) + } + + fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { + ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { + validator == *who || exposures.others.iter().any(|i| i.who == *who) + }) + } fn status( who: &Self::AccountId, ) -> Result, DispatchError> { @@ -1746,12 +1788,16 @@ impl StakingInterface for Pallet { .map(|(who, value)| IndividualExposure { who: who.clone(), value: value.clone() }) .collect::>(); let exposure = Exposure { total: Default::default(), own: Default::default(), others }; - >::insert(¤t_era, &stash, &exposure); + EraInfo::::set_exposure(*current_era, stash, exposure); } fn set_current_era(era: EraIndex) { CurrentEra::::put(era); } + + fn max_exposure_page_size() -> Page { + T::MaxExposurePageSize::get() + } } } diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index f084299be8e1..18ad3e4a6cf1 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -24,8 +24,8 @@ use frame_election_provider_support::{ use frame_support::{ pallet_prelude::*, traits::{ - Currency, Defensive, DefensiveResult, DefensiveSaturating, EnsureOrigin, - EstimateNextNewSession, Get, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, + Currency, Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, + LockableCurrency, OnUnbalanced, UnixTime, }, weights::Weight, BoundedVec, @@ -35,8 +35,9 @@ use sp_runtime::{ traits::{CheckedSub, SaturatedConversion, StaticLookup, Zero}, ArithmeticError, Perbill, Percent, }; + use sp_staking::{ - EraIndex, SessionIndex, + EraIndex, Page, SessionIndex, StakingAccount::{self, Controller, Stash}, }; use sp_std::prelude::*; @@ -47,9 +48,9 @@ pub use impls::*; use crate::{ slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, Forcing, MaxNominationsOf, NegativeImbalanceOf, Nominations, - NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, - UnappliedSlash, UnlockChunk, ValidatorPrefs, + EraRewardPoints, Exposure, ExposurePage, Forcing, MaxNominationsOf, NegativeImbalanceOf, + Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, + StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, }; // The speculative number of spans are used as an input of the weight annotation of @@ -61,12 +62,12 @@ pub(crate) const SPECULATIVE_NUM_SPANS: u32 = 32; pub mod pallet { use frame_election_provider_support::ElectionDataProvider; - use crate::BenchmarkingConfig; + use crate::{BenchmarkingConfig, PagedExposureMetadata}; use super::*; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(13); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -138,8 +139,8 @@ pub mod pallet { /// Following information is kept for eras in `[current_era - /// HistoryDepth, current_era]`: `ErasStakers`, `ErasStakersClipped`, /// `ErasValidatorPrefs`, `ErasValidatorReward`, `ErasRewardPoints`, - /// `ErasTotalStake`, `ErasStartSessionIndex`, - /// `StakingLedger.claimed_rewards`. + /// `ErasTotalStake`, `ErasStartSessionIndex`, `ClaimedRewards`, `ErasStakersPaged`, + /// `ErasStakersOverview`. /// /// Must be more than the number of eras delayed by session. /// I.e. active era must always be in history. I.e. `active_era > @@ -149,7 +150,7 @@ pub mod pallet { /// this should be set to same value or greater as in storage. /// /// Note: `HistoryDepth` is used as the upper bound for the `BoundedVec` - /// item `StakingLedger.claimed_rewards`. Setting this value lower than + /// item `StakingLedger.legacy_claimed_rewards`. Setting this value lower than /// the existing value can lead to inconsistencies in the /// `StakingLedger` and will need to be handled properly in a migration. /// The test `reducing_history_depth_abrupt` shows this effect. @@ -202,12 +203,19 @@ pub mod pallet { /// guess. type NextNewSession: EstimateNextNewSession>; - /// The maximum number of nominators rewarded for each validator. + /// The maximum size of each `T::ExposurePage`. + /// + /// An `ExposurePage` is weakly bounded to a maximum of `MaxExposurePageSize` + /// nominators. /// - /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can - /// claim their reward. This used to limit the i/o cost for the nominator payout. + /// For older non-paged exposure, a reward payout was restricted to the top + /// `MaxExposurePageSize` nominators. This is to limit the i/o cost for the + /// nominator payout. + /// + /// Note: `MaxExposurePageSize` is used to bound `ClaimedRewards` and is unsafe to reduce + /// without handling it in a migration. #[pallet::constant] - type MaxNominatorRewardedPerValidator: Get; + type MaxExposurePageSize: Get; /// The fraction of the validator set that is safe to be offending. /// After the threshold is reached a new era will be forced. @@ -390,7 +398,7 @@ pub mod pallet { #[pallet::getter(fn active_era)] pub type ActiveEra = StorageValue<_, ActiveEraInfo>; - /// The session index at which the era start for the last `HISTORY_DEPTH` eras. + /// The session index at which the era start for the last [`Config::HistoryDepth`] eras. /// /// Note: This tracks the starting session (i.e. session index when era start being active) /// for the eras in `[CurrentEra - HISTORY_DEPTH, CurrentEra]`. @@ -402,10 +410,11 @@ pub mod pallet { /// /// This is keyed first by the era index to allow bulk deletion and then the stash account. /// - /// Is it removed after `HISTORY_DEPTH` eras. + /// Is it removed after [`Config::HistoryDepth`] eras. /// If stakers hasn't been set or has been removed then empty exposure is returned. + /// + /// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures. #[pallet::storage] - #[pallet::getter(fn eras_stakers)] #[pallet::unbounded] pub type ErasStakers = StorageDoubleMap< _, @@ -417,17 +426,45 @@ pub mod pallet { ValueQuery, >; + /// Summary of validator exposure at a given era. + /// + /// This contains the total stake in support of the validator and their own stake. In addition, + /// it can also be used to get the number of nominators backing this validator and the number of + /// exposure pages they are divided into. The page count is useful to determine the number of + /// pages of rewards that needs to be claimed. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// Should only be accessed through `EraInfo`. + /// + /// Is it removed after [`Config::HistoryDepth`] eras. + /// If stakers hasn't been set or has been removed then empty overview is returned. + #[pallet::storage] + pub type ErasStakersOverview = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + PagedExposureMetadata>, + OptionQuery, + >; + /// Clipped Exposure of validator at era. /// + /// Note: This is deprecated, should be used as read-only and will be removed in the future. + /// New `Exposure`s are stored in a paged manner in `ErasStakersPaged` instead. + /// /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the - /// `T::MaxNominatorRewardedPerValidator` biggest stakers. + /// `T::MaxExposurePageSize` biggest stakers. /// (Note: the field `total` and `own` of the exposure remains unchanged). /// This is used to limit the i/o cost for the nominator payout. /// /// This is keyed fist by the era index to allow bulk deletion and then the stash account. /// - /// Is it removed after `HISTORY_DEPTH` eras. + /// It is removed after [`Config::HistoryDepth`] eras. /// If stakers hasn't been set or has been removed then empty exposure is returned. + /// + /// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures. #[pallet::storage] #[pallet::unbounded] #[pallet::getter(fn eras_stakers_clipped)] @@ -441,11 +478,49 @@ pub mod pallet { ValueQuery, >; + /// Paginated exposure of a validator at given era. + /// + /// This is keyed first by the era index to allow bulk deletion, then stash account and finally + /// the page. Should only be accessed through `EraInfo`. + /// + /// This is cleared after [`Config::HistoryDepth`] eras. + #[pallet::storage] + #[pallet::unbounded] + pub type ErasStakersPaged = StorageNMap< + _, + ( + NMapKey, + NMapKey, + NMapKey, + ), + ExposurePage>, + OptionQuery, + >; + + /// History of claimed paged rewards by era and validator. + /// + /// This is keyed by era and validator stash which maps to the set of page indexes which have + /// been claimed. + /// + /// It is removed after [`Config::HistoryDepth`] eras. + #[pallet::storage] + #[pallet::getter(fn claimed_rewards)] + #[pallet::unbounded] + pub type ClaimedRewards = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + Vec, + ValueQuery, + >; + /// Similar to `ErasStakers`, this holds the preferences of validators. /// /// This is keyed first by the era index to allow bulk deletion and then the stash account. /// - /// Is it removed after `HISTORY_DEPTH` eras. + /// Is it removed after [`Config::HistoryDepth`] eras. // If prefs hasn't been set or has been removed then 0 commission is returned. #[pallet::storage] #[pallet::getter(fn eras_validator_prefs)] @@ -459,14 +534,14 @@ pub mod pallet { ValueQuery, >; - /// The total validator era payout for the last `HISTORY_DEPTH` eras. + /// The total validator era payout for the last [`Config::HistoryDepth`] eras. /// /// Eras that haven't finished yet or has been removed doesn't have reward. #[pallet::storage] #[pallet::getter(fn eras_validator_reward)] pub type ErasValidatorReward = StorageMap<_, Twox64Concat, EraIndex, BalanceOf>; - /// Rewards for the last `HISTORY_DEPTH` eras. + /// Rewards for the last [`Config::HistoryDepth`] eras. /// If reward hasn't been set or has been removed then 0 reward is returned. #[pallet::storage] #[pallet::unbounded] @@ -474,7 +549,7 @@ pub mod pallet { pub type ErasRewardPoints = StorageMap<_, Twox64Concat, EraIndex, EraRewardPoints, ValueQuery>; - /// The total amount staked for the last `HISTORY_DEPTH` eras. + /// The total amount staked for the last [`Config::HistoryDepth`] eras. /// If total hasn't been set or has been removed then 0 stake is returned. #[pallet::storage] #[pallet::getter(fn eras_total_stake)] @@ -743,6 +818,8 @@ pub mod pallet { NotSortedAndUnique, /// Rewards for this era have already been claimed for this validator. AlreadyClaimed, + /// No nominators exist on this page. + InvalidPage, /// Incorrect previous history depth input provided. IncorrectHistoryDepth, /// Incorrect number of slashing spans provided. @@ -854,23 +931,10 @@ pub mod pallet { frame_system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; - let current_era = CurrentEra::::get().unwrap_or(0); - let history_depth = T::HistoryDepth::get(); - let last_reward_era = current_era.saturating_sub(history_depth); - let stash_balance = T::Currency::free_balance(&stash); let value = value.min(stash_balance); Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: value }); - let ledger = StakingLedger::::new( - stash.clone(), - value, - (last_reward_era..current_era) - .try_collect() - // Since last_reward_era is calculated as `current_era - - // HistoryDepth`, following bound is always expected to be - // satisfied. - .defensive_map_err(|_| Error::::BoundNotMet)?, - ); + let ledger = StakingLedger::::new(stash.clone(), value); // You're auto-bonded forever, here. We might improve this by only bonding when // you actually validate/nominate and remove once you unbond __everything__. @@ -1463,21 +1527,21 @@ pub mod pallet { Ok(()) } - /// Pay out all the stakers behind a single validator for a single era. + /// Pay out next page of the stakers behind a validator for the given era. /// - /// - `validator_stash` is the stash account of the validator. Their nominators, up to - /// `T::MaxNominatorRewardedPerValidator`, will also receive their rewards. + /// - `validator_stash` is the stash account of the validator. /// - `era` may be any era between `[current_era - history_depth; current_era]`. /// /// The origin of this call must be _Signed_. Any account can call this function, even if /// it is not one of the stakers. /// - /// ## Complexity - /// - At most O(MaxNominatorRewardedPerValidator). + /// The reward payout could be paged in case there are too many nominators backing the + /// `validator_stash`. This call will payout unpaid pages in an ascending order. To claim a + /// specific page, use `payout_stakers_by_page`.` + /// + /// If all pages are claimed, it returns an error `InvalidPage`. #[pallet::call_index(18)] - #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked( - T::MaxNominatorRewardedPerValidator::get() - ))] + #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked(T::MaxExposurePageSize::get()))] pub fn payout_stakers( origin: OriginFor, validator_stash: T::AccountId, @@ -1779,6 +1843,35 @@ pub mod pallet { MinCommission::::put(new); Ok(()) } + + /// Pay out a page of the stakers behind a validator for the given era and page. + /// + /// - `validator_stash` is the stash account of the validator. + /// - `era` may be any era between `[current_era - history_depth; current_era]`. + /// - `page` is the page index of nominators to pay out with value between 0 and + /// `num_nominators / T::MaxExposurePageSize`. + /// + /// The origin of this call must be _Signed_. Any account can call this function, even if + /// it is not one of the stakers. + /// + /// If a validator has more than [`Config::MaxExposurePageSize`] nominators backing + /// them, then the list of nominators is paged, with each page being capped at + /// [`Config::MaxExposurePageSize`.] If a validator has more than one page of nominators, + /// the call needs to be made for each page separately in order for all the nominators + /// backing a validator to receive the reward. The nominators are not sorted across pages + /// and so it should not be assumed the highest staker would be on the topmost page and vice + /// versa. If rewards are not claimed in [`Config::HistoryDepth`] eras, they are lost. + #[pallet::call_index(26)] + #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked(T::MaxExposurePageSize::get()))] + pub fn payout_stakers_by_page( + origin: OriginFor, + validator_stash: T::AccountId, + era: EraIndex, + page: Page, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + Self::do_payout_stakers_by_page(validator_stash, era, page) + } } } diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index cb620f89f12c..ee6f67adf14c 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -29,6 +29,7 @@ use frame_support::{ pallet_prelude::*, traits::{Currency, Get, ReservableCurrency}, }; + use mock::*; use pallet_balances::Error as BalancesError; use sp_runtime::{ @@ -158,7 +159,7 @@ fn basic_setup_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); // Account 21 controls its own stash, which is 200 * balance_factor units @@ -169,7 +170,7 @@ fn basic_setup_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); // Account 1 does not control any stash @@ -192,13 +193,13 @@ fn basic_setup_works() { total: 500, active: 500, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1125, own: 1000, @@ -206,7 +207,7 @@ fn basic_setup_works() { }, ); assert_eq!( - Staking::eras_stakers(active_era(), 21), + Staking::eras_stakers(active_era(), &21), Exposure { total: 1375, own: 1000, @@ -467,7 +468,7 @@ fn staking_should_work() { total: 1500, active: 1500, unlocking: Default::default(), - claimed_rewards: bounded_vec![0], + legacy_claimed_rewards: bounded_vec![], } ); // e.g. it cannot reserve more than 500 that it has free from the total 2000 @@ -522,7 +523,7 @@ fn less_than_needed_candidates_works() { // But the exposure is updated in a simple way. No external votes exists. // This is purely self-vote. - assert!(ErasStakers::::iter_prefix_values(active_era()) + assert!(ErasStakersPaged::::iter_prefix_values((active_era(),)) .all(|exposure| exposure.others.is_empty())); }); } @@ -640,9 +641,9 @@ fn nominating_and_rewards_should_work() { assert_eq!(Balances::total_balance(&21), initial_balance_21 + total_payout_0 / 2); initial_balance_21 = Balances::total_balance(&21); - assert_eq!(ErasStakers::::iter_prefix_values(active_era()).count(), 2); + assert_eq!(ErasStakersPaged::::iter_prefix_values((active_era(),)).count(), 2); assert_eq!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1000 + 800, own: 1000, @@ -653,7 +654,7 @@ fn nominating_and_rewards_should_work() { }, ); assert_eq!( - Staking::eras_stakers(active_era(), 21), + Staking::eras_stakers(active_era(), &21), Exposure { total: 1000 + 1200, own: 1000, @@ -713,7 +714,7 @@ fn nominators_also_get_slashed_pro_rata() { ExtBuilder::default().build_and_execute(|| { mock::start_active_era(1); let slash_percent = Perbill::from_percent(5); - let initial_exposure = Staking::eras_stakers(active_era(), 11); + let initial_exposure = Staking::eras_stakers(active_era(), &11); // 101 is a nominator for 11 assert_eq!(initial_exposure.others.first().unwrap().who, 101); @@ -981,7 +982,7 @@ fn cannot_transfer_staked_balance() { // Confirm account 11 has some free balance assert_eq!(Balances::free_balance(11), 1000); // Confirm account 11 (via controller) is totally staked - assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); // Confirm account 11 cannot transfer as a result assert_noop!( Balances::transfer_allow_death(RuntimeOrigin::signed(11), 21, 1), @@ -1006,7 +1007,7 @@ fn cannot_transfer_staked_balance_2() { // Confirm account 21 has some free balance assert_eq!(Balances::free_balance(21), 2000); // Confirm account 21 (via controller) is totally staked - assert_eq!(Staking::eras_stakers(active_era(), 21).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 1000); // Confirm account 21 can transfer at most 1000 assert_noop!( Balances::transfer_allow_death(RuntimeOrigin::signed(21), 21, 1001), @@ -1025,7 +1026,7 @@ fn cannot_reserve_staked_balance() { // Confirm account 11 has some free balance assert_eq!(Balances::free_balance(11), 1000); // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::eras_stakers(active_era(), 11).own, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000); // Confirm account 11 cannot reserve as a result assert_noop!(Balances::reserve(&11, 1), BalancesError::::LiquidityRestrictions); @@ -1054,7 +1055,7 @@ fn reward_destination_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1077,10 +1078,13 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: bounded_vec![0], + legacy_claimed_rewards: bounded_vec![], } ); + // (era 0, page 0) is claimed + assert_eq!(Staking::claimed_rewards(0, &11), vec![0]); + // Change RewardDestination to Stash >::insert(&11, RewardDestination::Stash); @@ -1095,6 +1099,8 @@ fn reward_destination_works() { assert_eq!(Staking::payee(11.into()), RewardDestination::Stash); // Check that reward went to the stash account assert_eq!(Balances::free_balance(11), 1000 + total_payout_0 + total_payout_1); + // Record this value + let recorded_stash_balance = 1000 + total_payout_0 + total_payout_1; // Check that amount at stake is NOT increased assert_eq!( Staking::ledger(11.into()).unwrap(), @@ -1103,10 +1109,13 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: bounded_vec![0, 1], + legacy_claimed_rewards: bounded_vec![], } ); + // (era 1, page 0) is claimed + assert_eq!(Staking::claimed_rewards(1, &11), vec![0]); + // Change RewardDestination to Controller >::insert(&11, RewardDestination::Controller); @@ -1123,7 +1132,7 @@ fn reward_destination_works() { // Check that RewardDestination is Controller assert_eq!(Staking::payee(11.into()), RewardDestination::Controller); // Check that reward went to the controller account - assert_eq!(Balances::free_balance(11), 23150 + total_payout_2); + assert_eq!(Balances::free_balance(11), recorded_stash_balance + total_payout_2); // Check that amount at stake is NOT increased assert_eq!( Staking::ledger(11.into()).unwrap(), @@ -1132,9 +1141,12 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: bounded_vec![0, 1, 2], + legacy_claimed_rewards: bounded_vec![], } ); + + // (era 2, page 0) is claimed + assert_eq!(Staking::claimed_rewards(2, &11), vec![0]); }); } @@ -1159,7 +1171,7 @@ fn validator_payment_prefs_work() { // Compute total payout now for whole duration as other parameter won't change let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); - let exposure_1 = Staking::eras_stakers(active_era(), 11); + let exposure_1 = Staking::eras_stakers(active_era(), &11); Pallet::::reward_by_ids(vec![(11, 1)]); mock::start_active_era(2); @@ -1192,7 +1204,7 @@ fn bond_extra_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1209,7 +1221,7 @@ fn bond_extra_works() { total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1223,7 +1235,7 @@ fn bond_extra_works() { total: 1000000, active: 1000000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); }); @@ -1261,11 +1273,11 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); assert_eq!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1000, own: 1000, others: vec![] } ); @@ -1279,12 +1291,12 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); // Exposure is a snapshot! only updated after the next era update. assert_ne!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] } ); @@ -1300,12 +1312,12 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); // Exposure is now updated. assert_eq!( - Staking::eras_stakers(active_era(), 11), + Staking::eras_stakers(active_era(), &11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] } ); @@ -1318,7 +1330,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], }, ); @@ -1331,7 +1343,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], }, ); @@ -1347,7 +1359,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], }, ); @@ -1363,7 +1375,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 100, active: 100, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], }, ); }) @@ -1466,7 +1478,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1485,7 +1497,7 @@ fn rebond_works() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1498,7 +1510,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1511,7 +1523,7 @@ fn rebond_works() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1524,7 +1536,7 @@ fn rebond_works() { total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1537,7 +1549,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1552,7 +1564,7 @@ fn rebond_works() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1565,7 +1577,7 @@ fn rebond_works() { total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); }) @@ -1592,7 +1604,7 @@ fn rebond_is_fifo() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1607,7 +1619,7 @@ fn rebond_is_fifo() { total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 2 + 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1625,7 +1637,7 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 300, era: 3 + 3 }, ], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1644,7 +1656,7 @@ fn rebond_is_fifo() { UnlockChunk { value: 300, era: 3 + 3 }, UnlockChunk { value: 200, era: 4 + 3 }, ], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1660,7 +1672,7 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 100, era: 3 + 3 }, ], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); }) @@ -1689,7 +1701,7 @@ fn rebond_emits_right_value_in_event() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 1 + 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1702,7 +1714,7 @@ fn rebond_emits_right_value_in_event() { total: 1000, active: 200, unlocking: bounded_vec![UnlockChunk { value: 800, era: 1 + 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); // Event emitted should be correct @@ -1717,7 +1729,7 @@ fn rebond_emits_right_value_in_event() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); // Event emitted should be correct, only 800 @@ -1737,15 +1749,15 @@ fn reward_to_stake_works() { // Confirm account 10 and 20 are validators assert!(>::contains_key(&11) && >::contains_key(&21)); - assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); - assert_eq!(Staking::eras_stakers(active_era(), 21).total, 2000); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000); // Give the man some money. let _ = Balances::make_free_balance_be(&10, 1000); let _ = Balances::make_free_balance_be(&20, 1000); // Bypass logic and change current exposure - ErasStakers::::insert(0, 21, Exposure { total: 69, own: 69, others: vec![] }); + EraInfo::::set_exposure(0, &21, Exposure { total: 69, own: 69, others: vec![] }); >::insert( &20, StakingLedgerInspect { @@ -1753,7 +1765,7 @@ fn reward_to_stake_works() { total: 69, active: 69, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], }, ); @@ -1766,21 +1778,18 @@ fn reward_to_stake_works() { mock::start_active_era(1); mock::make_all_reward_payment(0); - assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); - assert_eq!(Staking::eras_stakers(active_era(), 21).total, 2000); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000); let _11_balance = Balances::free_balance(&11); - let _21_balance = Balances::free_balance(&21); - assert_eq!(_11_balance, 1000 + total_payout_0 / 2); - assert_eq!(_21_balance, 2000 + total_payout_0 / 2); // Trigger another new era as the info are frozen before the era start. mock::start_active_era(2); // -- new infos - assert_eq!(Staking::eras_stakers(active_era(), 11).total, _11_balance); - assert_eq!(Staking::eras_stakers(active_era(), 21).total, _21_balance); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000 + total_payout_0 / 2); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000 + total_payout_0 / 2); }); } @@ -1807,7 +1816,7 @@ fn reap_stash_works() { // no easy way to cause an account to go below ED, we tweak their staking ledger // instead. - Ledger::::insert(11, StakingLedger::::new(11, 5, bounded_vec![])); + Ledger::::insert(11, StakingLedger::::new(11, 5)); // reap-able assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0)); @@ -1901,8 +1910,8 @@ fn wrong_vote_is_moot() { assert_eq_uvec!(validator_controllers(), vec![21, 11]); // our new voter is taken into account - assert!(Staking::eras_stakers(active_era(), 11).others.iter().any(|i| i.who == 61)); - assert!(Staking::eras_stakers(active_era(), 21).others.iter().any(|i| i.who == 61)); + assert!(Staking::eras_stakers(active_era(), &11).others.iter().any(|i| i.who == 61)); + assert!(Staking::eras_stakers(active_era(), &21).others.iter().any(|i| i.who == 61)); }); } @@ -1935,7 +1944,7 @@ fn bond_with_no_staked_value() { active: 0, total: 5, unlocking: bounded_vec![UnlockChunk { value: 5, era: 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -1989,9 +1998,9 @@ fn bond_with_little_staked_value_bounded() { mock::start_active_era(1); mock::make_all_reward_payment(0); - // 2 is elected. + // 1 is elected. assert_eq_uvec!(validator_controllers(), vec![21, 11, 1]); - assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); + assert_eq!(Staking::eras_stakers(active_era(), &2).total, 0); // Old ones are rewarded. assert_eq_error_rate!( @@ -2009,7 +2018,7 @@ fn bond_with_little_staked_value_bounded() { mock::make_all_reward_payment(1); assert_eq_uvec!(validator_controllers(), vec![21, 11, 1]); - assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); + assert_eq!(Staking::eras_stakers(active_era(), &2).total, 0); // 2 is now rewarded. assert_eq_error_rate!( @@ -2160,8 +2169,8 @@ fn phragmen_should_not_overflow() { assert_eq_uvec!(validator_controllers(), vec![3, 5]); // We can safely convert back to values within [u64, u128]. - assert!(Staking::eras_stakers(active_era(), 3).total > Votes::max_value() as Balance); - assert!(Staking::eras_stakers(active_era(), 5).total > Votes::max_value() as Balance); + assert!(Staking::eras_stakers(active_era(), &3).total > Votes::max_value() as Balance); + assert!(Staking::eras_stakers(active_era(), &5).total > Votes::max_value() as Balance); }) } @@ -2185,10 +2194,9 @@ fn reward_validator_slashing_validator_does_not_overflow() { // Check reward ErasRewardPoints::::insert(0, reward); - ErasStakers::::insert(0, 11, &exposure); - ErasStakersClipped::::insert(0, 11, exposure); + EraInfo::::set_exposure(0, &11, exposure); ErasValidatorReward::::insert(0, stake); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 0)); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0)); assert_eq!(Balances::total_balance(&11), stake * 2); // Set staker @@ -2198,9 +2206,9 @@ fn reward_validator_slashing_validator_does_not_overflow() { // only slashes out of bonded stake are applied. without this line, it is 0. Staking::bond(RuntimeOrigin::signed(2), stake - 1, RewardDestination::default()).unwrap(); // Override exposure of 11 - ErasStakers::::insert( + EraInfo::::set_exposure( 0, - 11, + &11, Exposure { total: stake, own: 1, @@ -2211,7 +2219,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { // Check slashing on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(100)], @@ -2310,7 +2318,7 @@ fn offence_forces_new_era() { ExtBuilder::default().build_and_execute(|| { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(5)], @@ -2328,7 +2336,7 @@ fn offence_ensures_new_era_without_clobbering() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(5)], @@ -2346,7 +2354,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2367,7 +2375,7 @@ fn slashing_performed_according_exposure() { // This test checks that slashing is performed according the exposure (or more precisely, // historical exposure), not the current balance. ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::eras_stakers(active_era(), 11).own, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000); // Handle an offence with a historical exposure. on_offence_now( @@ -2393,7 +2401,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2416,7 +2424,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_in_era( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2432,7 +2440,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_in_era( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], // NOTE: A 100% slash here would clean up the account, causing de-registration. @@ -2459,11 +2467,11 @@ fn reporters_receive_their_slice() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::eras_stakers(active_era(), 11).total, initial_balance); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![1, 2], }], &[Perbill::from_percent(50)], @@ -2486,11 +2494,11 @@ fn subsequent_reports_in_same_span_pay_out_less() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::eras_stakers(active_era(), 11).total, initial_balance); + assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![1], }], &[Perbill::from_percent(20)], @@ -2503,7 +2511,7 @@ fn subsequent_reports_in_same_span_pay_out_less() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![1], }], &[Perbill::from_percent(50)], @@ -2525,7 +2533,7 @@ fn invulnerables_are_not_slashed() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(21), 2000); - let exposure = Staking::eras_stakers(active_era(), 21); + let exposure = Staking::eras_stakers(active_era(), &21); let initial_balance = Staking::slashable_balance_of(&21); let nominator_balances: Vec<_> = @@ -2534,11 +2542,11 @@ fn invulnerables_are_not_slashed() { on_offence_now( &[ OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }, OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }, ], @@ -2568,7 +2576,7 @@ fn dont_slash_if_fraction_is_zero() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2589,7 +2597,7 @@ fn only_slash_for_max_in_era() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(50)], @@ -2601,7 +2609,7 @@ fn only_slash_for_max_in_era() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(25)], @@ -2612,7 +2620,7 @@ fn only_slash_for_max_in_era() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(60)], @@ -2634,7 +2642,7 @@ fn garbage_collection_after_slashing() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2646,7 +2654,7 @@ fn garbage_collection_after_slashing() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(100)], @@ -2683,12 +2691,15 @@ fn garbage_collection_on_window_pruning() { assert_eq!(Balances::free_balance(11), 1000); let now = active_era(); - let exposure = Staking::eras_stakers(now, 11); + let exposure = Staking::eras_stakers(now, &11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( - &[OffenceDetails { offender: (11, Staking::eras_stakers(now, 11)), reporters: vec![] }], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(now, &11)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); @@ -2723,14 +2734,14 @@ fn slashing_nominators_by_span_max() { assert_eq!(Balances::free_balance(101), 2000); assert_eq!(Staking::slashable_balance_of(&21), 1000); - let exposure_11 = Staking::eras_stakers(active_era(), 11); - let exposure_21 = Staking::eras_stakers(active_era(), 21); + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value; let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_in_era( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2757,7 +2768,7 @@ fn slashing_nominators_by_span_max() { // second slash: higher era, higher value, same span. on_offence_in_era( &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }], &[Perbill::from_percent(30)], @@ -2779,7 +2790,7 @@ fn slashing_nominators_by_span_max() { // in-era value, but lower slash value than slash 2. on_offence_in_era( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(20)], @@ -2814,7 +2825,7 @@ fn slashes_are_summed_across_spans() { on_offence_now( &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2837,7 +2848,7 @@ fn slashes_are_summed_across_spans() { on_offence_now( &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2861,7 +2872,7 @@ fn deferred_slashes_are_deferred() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; @@ -2869,7 +2880,7 @@ fn deferred_slashes_are_deferred() { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -2920,7 +2931,7 @@ fn retroactive_deferred_slashes_two_eras_before() { assert_eq!(BondingDuration::get(), 3); mock::start_active_era(1); - let exposure_11_at_era1 = Staking::eras_stakers(active_era(), 11); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11); mock::start_active_era(3); @@ -2956,7 +2967,7 @@ fn retroactive_deferred_slashes_one_before() { assert_eq!(BondingDuration::get(), 3); mock::start_active_era(1); - let exposure_11_at_era1 = Staking::eras_stakers(active_era(), 11); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11); // unbond at slash era. mock::start_active_era(2); @@ -3004,12 +3015,12 @@ fn staker_cannot_bail_deferred_slash() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -3028,7 +3039,7 @@ fn staker_cannot_bail_deferred_slash() { active: 0, total: 500, stash: 101, - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], unlocking: bounded_vec![UnlockChunk { era: 4u32, value: 500 }], } ); @@ -3078,7 +3089,7 @@ fn remove_deferred() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; @@ -3154,7 +3165,7 @@ fn remove_multi_deferred() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(Balances::free_balance(101), 2000); on_offence_now( @@ -3164,7 +3175,7 @@ fn remove_multi_deferred() { on_offence_now( &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), 21)), + offender: (21, Staking::eras_stakers(active_era(), &21)), reporters: vec![], }], &[Perbill::from_percent(10)], @@ -3529,8 +3540,9 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { mock::start_active_era(1); Pallet::::reward_by_ids(vec![(11, 1)]); - // Change total issuance in order to modify total payout + // Increase total token issuance to affect the total payout. let _ = Balances::deposit_creating(&999, 1_000_000_000); + // Compute total payout now for whole duration as other parameter won't change let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); assert!(total_payout_1 != total_payout_0); @@ -3538,7 +3550,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { mock::start_active_era(2); Pallet::::reward_by_ids(vec![(11, 1)]); - // Change total issuance in order to modify total payout + // Increase total token issuance to affect the total payout. let _ = Balances::deposit_creating(&999, 1_000_000_000); // Compute total payout now for whole duration as other parameter won't change let total_payout_2 = current_total_payout_for_duration(reward_time_per_era()); @@ -3555,19 +3567,19 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // Last kept is 1: assert!(current_era - HistoryDepth::get() == 1); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 0), + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0), // Fail: Era out of history Error::::InvalidEraToReward.with_weight(err_weight) ); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2)); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0)); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2), + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0), // Fail: Double claim Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, active_era), + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, active_era, 0), // Fail: Era not finished yet Error::::InvalidEraToReward.with_weight(err_weight) ); @@ -3593,7 +3605,7 @@ fn zero_slash_keeps_nominators() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), 11); + let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(Balances::free_balance(101), 2000); on_offence_now( @@ -3672,9 +3684,10 @@ fn six_session_delay() { } #[test] -fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward() { +fn test_nominators_over_max_exposure_page_size_are_rewarded() { ExtBuilder::default().build_and_execute(|| { - for i in 0..=<::MaxNominatorRewardedPerValidator as Get<_>>::get() { + // bond one nominator more than the max exposure page size to validator 11. + for i in 0..=MaxExposurePageSize::get() { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; Balances::make_free_balance_be(&stash, balance); @@ -3694,29 +3707,73 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( mock::start_active_era(2); mock::make_all_reward_payment(1); - // Assert only nominators from 1 to Max are rewarded - for i in 0..=<::MaxNominatorRewardedPerValidator as Get<_>>::get() { + // Assert nominators from 1 to Max are rewarded + let mut i: u32 = 0; + while i < MaxExposurePageSize::get() { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; - if stash == 10_000 { - assert!(Balances::free_balance(&stash) == balance); - } else { - assert!(Balances::free_balance(&stash) > balance); - } + assert!(Balances::free_balance(&stash) > balance); + i += 1; } + + // Assert overflowing nominators from page 1 are also rewarded + let stash = 10_000 + i as AccountId; + assert!(Balances::free_balance(&stash) > (10_000 + i) as Balance); }); } #[test] -fn test_payout_stakers() { - // Test that payout_stakers work in general, including that only the top - // `T::MaxNominatorRewardedPerValidator` nominators are rewarded. +fn test_nominators_are_rewarded_for_all_exposure_page() { + ExtBuilder::default().build_and_execute(|| { + // 3 pages of exposure + let nominator_count = 2 * MaxExposurePageSize::get() + 1; + + for i in 0..nominator_count { + let stash = 10_000 + i as AccountId; + let balance = 10_000 + i as Balance; + Balances::make_free_balance_be(&stash, balance); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(stash), + balance, + RewardDestination::Stash + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(stash), vec![11])); + } + mock::start_active_era(1); + + Pallet::::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + + mock::start_active_era(2); + mock::make_all_reward_payment(1); + + assert_eq!(EraInfo::::get_page_count(1, &11), 3); + + // Assert all nominators are rewarded according to their stake + for i in 0..nominator_count { + // balance of the nominator after the reward payout. + let current_balance = Balances::free_balance(&((10000 + i) as AccountId)); + // balance of the nominator in the previous iteration. + let previous_balance = Balances::free_balance(&((10000 + i - 1) as AccountId)); + // balance before the reward. + let original_balance = 10_000 + i as Balance; + + assert!(current_balance > original_balance); + // since the stake of the nominator is increasing for each iteration, the final balance + // after the reward should also be higher than the previous iteration. + assert!(current_balance > previous_balance); + } + }); +} + +#[test] +fn test_multi_page_payout_stakers_by_page() { + // Test that payout_stakers work in general and that it pays the correct amount of reward. ExtBuilder::default().has_stakers(false).build_and_execute(|| { let balance = 1000; // Track the exposure of the validator and all nominators. let mut total_exposure = balance; - // Track the exposure of the validator and the nominators that will get paid out. - let mut payout_exposure = balance; // Create a validator: bond_validator(11, balance); // Default(64) assert_eq!(Validators::::count(), 1); @@ -3725,54 +3782,86 @@ fn test_payout_stakers() { for i in 0..100 { let bond_amount = balance + i as Balance; bond_nominator(1000 + i, bond_amount, vec![11]); + // with multi page reward payout, payout exposure is same as total exposure. total_exposure += bond_amount; - if i >= 36 { - payout_exposure += bond_amount; - }; } - let payout_exposure_part = Perbill::from_rational(payout_exposure, total_exposure); mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); + // Since `MaxExposurePageSize = 64`, there are two pages of validator exposure. + assert_eq!(EraInfo::::get_page_count(1, &11), 2); + // compute and ensure the reward amount is greater than zero. let payout = current_total_payout_for_duration(reward_time_per_era()); - let actual_paid_out = payout_exposure_part * payout; - mock::start_active_era(2); + // verify the exposures are calculated correctly. + let actual_exposure_0 = EraInfo::::get_paged_exposure(1, &11, 0).unwrap(); + assert_eq!(actual_exposure_0.total(), total_exposure); + assert_eq!(actual_exposure_0.own(), 1000); + assert_eq!(actual_exposure_0.others().len(), 64); + let actual_exposure_1 = EraInfo::::get_paged_exposure(1, &11, 1).unwrap(); + assert_eq!(actual_exposure_1.total(), total_exposure); + // own stake is only included once in the first page + assert_eq!(actual_exposure_1.own(), 0); + assert_eq!(actual_exposure_1.others().len(), 100 - 64); + let pre_payout_total_issuance = Balances::total_issuance(); RewardOnUnbalanceWasCalled::set(false); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); - assert_eq_error_rate!( - Balances::total_issuance(), - pre_payout_total_issuance + actual_paid_out, - 1 - ); - assert!(RewardOnUnbalanceWasCalled::get()); + System::reset_events(); + + let controller_balance_before_p0_payout = Balances::free_balance(&11); + // Payout rewards for first exposure page + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); - // `Rewarded` events are being executed. + // verify `Rewarded` events are being executed assert!(matches!( staking_events_since_last_call().as_slice(), &[ .., - Event::Rewarded { stash: 1037, dest: RewardDestination::Controller, amount: 108 }, - Event::Rewarded { stash: 1036, dest: RewardDestination::Controller, amount: 108 } + Event::Rewarded { stash: 1063, dest: RewardDestination::Controller, amount: 111 }, + Event::Rewarded { stash: 1064, dest: RewardDestination::Controller, amount: 111 }, ] )); + let controller_balance_after_p0_payout = Balances::free_balance(&11); + + // verify rewards have been paid out but still some left + assert!(Balances::total_issuance() > pre_payout_total_issuance); + assert!(Balances::total_issuance() < pre_payout_total_issuance + payout); + + // verify the validator has been rewarded + assert!(controller_balance_after_p0_payout > controller_balance_before_p0_payout); + + // Payout the second and last page of nominators + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 1)); + + // verify `Rewarded` events are being executed for the second page. + let events = staking_events_since_last_call(); + assert!(matches!( + events.as_slice(), + &[ + Event::PayoutStarted { era_index: 1, validator_stash: 11 }, + Event::Rewarded { stash: 1065, dest: RewardDestination::Controller, amount: 111 }, + Event::Rewarded { stash: 1066, dest: RewardDestination::Controller, amount: 111 }, + .. + ] + )); + // verify the validator was not rewarded the second time + assert_eq!(Balances::free_balance(&11), controller_balance_after_p0_payout); + + // verify all rewards have been paid out + assert_eq_error_rate!(Balances::total_issuance(), pre_payout_total_issuance + payout, 2); + assert!(RewardOnUnbalanceWasCalled::get()); + // Top 64 nominators of validator 11 automatically paid out, including the validator - // Validator payout goes to controller. assert!(Balances::free_balance(&11) > balance); - for i in 36..100 { + for i in 0..100 { assert!(Balances::free_balance(&(1000 + i)) > balance + i as Balance); } - // The bottom 36 do not - for i in 0..36 { - assert_eq!(Balances::free_balance(&(1000 + i)), balance + i as Balance); - } - // We track rewards in `claimed_rewards` vec + // verify we no longer track rewards in `legacy_claimed_rewards` vec assert_eq!( Staking::ledger(11.into()).unwrap(), StakingLedgerInspect { @@ -3780,30 +3869,207 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![1] + legacy_claimed_rewards: bounded_vec![] } ); + // verify rewards are tracked to prevent double claims + let ledger = Staking::ledger(11.into()); + for page in 0..EraInfo::::get_page_count(1, &11) { + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + ledger.as_ref().unwrap(), + &11, + page + ), + true + ); + } + for i in 3..16 { Staking::reward_by_ids(vec![(11, 1)]); // compute and ensure the reward amount is greater than zero. let payout = current_total_payout_for_duration(reward_time_per_era()); - let actual_paid_out = payout_exposure_part * payout; let pre_payout_total_issuance = Balances::total_issuance(); mock::start_active_era(i); RewardOnUnbalanceWasCalled::set(false); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, i - 1)); + mock::make_all_reward_payment(i - 1); assert_eq_error_rate!( Balances::total_issuance(), - pre_payout_total_issuance + actual_paid_out, - 1 + pre_payout_total_issuance + payout, + 2 ); assert!(RewardOnUnbalanceWasCalled::get()); + + // verify we track rewards for each era and page + for page in 0..EraInfo::::get_page_count(i - 1, &11) { + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + i - 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + page + ), + true + ); + } + } + + assert_eq!(Staking::claimed_rewards(14, &11), vec![0, 1]); + + let last_era = 99; + let history_depth = HistoryDepth::get(); + let last_reward_era = last_era - 1; + let first_claimable_reward_era = last_era - history_depth; + for i in 16..=last_era { + Staking::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(i); + } + + // verify we clean up history as we go + for era in 0..15 { + assert_eq!(Staking::claimed_rewards(era, &11), Vec::::new()); + } + + // verify only page 0 is marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + first_claimable_reward_era, + 0 + )); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0]); + + // verify page 0 and 1 are marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + first_claimable_reward_era, + 1 + )); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0, 1]); + + // verify only page 0 is marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + last_reward_era, + 0 + )); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![0]); + + // verify page 0 and 1 are marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + last_reward_era, + 1 + )); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![0, 1]); + + // Out of order claims works. + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 69, 0)); + assert_eq!(Staking::claimed_rewards(69, &11), vec![0]); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 23, 1)); + assert_eq!(Staking::claimed_rewards(23, &11), vec![1]); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 42, 0)); + assert_eq!(Staking::claimed_rewards(42, &11), vec![0]); + }); +} + +#[test] +fn test_multi_page_payout_stakers_backward_compatible() { + // Test that payout_stakers work in general and that it pays the correct amount of reward. + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1000; + // Track the exposure of the validator and all nominators. + let mut total_exposure = balance; + // Create a validator: + bond_validator(11, balance); // Default(64) + assert_eq!(Validators::::count(), 1); + + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + + // Create nominators, targeting stash of validators + for i in 0..100 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + // with multi page reward payout, payout exposure is same as total exposure. + total_exposure += bond_amount; } - // We track rewards in `claimed_rewards` vec + mock::start_active_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + + // Since `MaxExposurePageSize = 64`, there are two pages of validator exposure. + assert_eq!(EraInfo::::get_page_count(1, &11), 2); + + // compute and ensure the reward amount is greater than zero. + let payout = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(2); + + // verify the exposures are calculated correctly. + let actual_exposure_0 = EraInfo::::get_paged_exposure(1, &11, 0).unwrap(); + assert_eq!(actual_exposure_0.total(), total_exposure); + assert_eq!(actual_exposure_0.own(), 1000); + assert_eq!(actual_exposure_0.others().len(), 64); + let actual_exposure_1 = EraInfo::::get_paged_exposure(1, &11, 1).unwrap(); + assert_eq!(actual_exposure_1.total(), total_exposure); + // own stake is only included once in the first page + assert_eq!(actual_exposure_1.own(), 0); + assert_eq!(actual_exposure_1.others().len(), 100 - 64); + + let pre_payout_total_issuance = Balances::total_issuance(); + RewardOnUnbalanceWasCalled::set(false); + + let controller_balance_before_p0_payout = Balances::free_balance(&11); + // Payout rewards for first exposure page + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + // page 0 is claimed + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + let controller_balance_after_p0_payout = Balances::free_balance(&11); + + // verify rewards have been paid out but still some left + assert!(Balances::total_issuance() > pre_payout_total_issuance); + assert!(Balances::total_issuance() < pre_payout_total_issuance + payout); + + // verify the validator has been rewarded + assert!(controller_balance_after_p0_payout > controller_balance_before_p0_payout); + + // This should payout the second and last page of nominators + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + + // cannot claim any more pages + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // verify the validator was not rewarded the second time + assert_eq!(Balances::free_balance(&11), controller_balance_after_p0_payout); + + // verify all rewards have been paid out + assert_eq_error_rate!(Balances::total_issuance(), pre_payout_total_issuance + payout, 2); + assert!(RewardOnUnbalanceWasCalled::get()); + + // verify all nominators of validator 11 are paid out, including the validator + // Validator payout goes to controller. + assert!(Balances::free_balance(&11) > balance); + for i in 0..100 { + assert!(Balances::free_balance(&(1000 + i)) > balance + i as Balance); + } + + // verify we no longer track rewards in `legacy_claimed_rewards` vec + let ledger = Staking::ledger(11.into()); assert_eq!( Staking::ledger(11.into()).unwrap(), StakingLedgerInspect { @@ -3811,14 +4077,60 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (1..=14).collect::>().try_into().unwrap() + legacy_claimed_rewards: bounded_vec![] } ); + // verify rewards are tracked to prevent double claims + for page in 0..EraInfo::::get_page_count(1, &11) { + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + ledger.as_ref().unwrap(), + &11, + page + ), + true + ); + } + + for i in 3..16 { + Staking::reward_by_ids(vec![(11, 1)]); + + // compute and ensure the reward amount is greater than zero. + let payout = current_total_payout_for_duration(reward_time_per_era()); + let pre_payout_total_issuance = Balances::total_issuance(); + + mock::start_active_era(i); + RewardOnUnbalanceWasCalled::set(false); + mock::make_all_reward_payment(i - 1); + assert_eq_error_rate!( + Balances::total_issuance(), + pre_payout_total_issuance + payout, + 2 + ); + assert!(RewardOnUnbalanceWasCalled::get()); + + // verify we track rewards for each era and page + for page in 0..EraInfo::::get_page_count(i - 1, &11) { + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + i - 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + page + ), + true + ); + } + } + + assert_eq!(Staking::claimed_rewards(14, &11), vec![0, 1]); + let last_era = 99; let history_depth = HistoryDepth::get(); - let expected_last_reward_era = last_era - 1; - let expected_start_reward_era = last_era - history_depth; + let last_reward_era = last_era - 1; + let first_claimable_reward_era = last_era - history_depth; for i in 16..=last_era { Staking::reward_by_ids(vec![(11, 1)]); // compute and ensure the reward amount is greater than zero. @@ -3826,48 +4138,125 @@ fn test_payout_stakers() { mock::start_active_era(i); } - // We clean it up as history passes + // verify we clean up history as we go + for era in 0..15 { + assert_eq!(Staking::claimed_rewards(era, &11), Vec::::new()); + } + + // verify only page 0 is marked as claimed assert_ok!(Staking::payout_stakers( RuntimeOrigin::signed(1337), 11, - expected_start_reward_era + first_claimable_reward_era )); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0]); + + // verify page 0 and 1 are marked as claimed assert_ok!(Staking::payout_stakers( RuntimeOrigin::signed(1337), 11, - expected_last_reward_era + first_claimable_reward_era, )); - assert_eq!( - Staking::ledger(11.into()).unwrap(), - StakingLedgerInspect { - stash: 11, - total: 1000, - active: 1000, - unlocking: Default::default(), - claimed_rewards: bounded_vec![expected_start_reward_era, expected_last_reward_era] - } + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0, 1]); + + // change order and verify only page 1 is marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + last_reward_era, + 1 + )); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![1]); + + // verify page 0 is claimed even when explicit page is not passed + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, last_reward_era,)); + + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![1, 0]); + + // cannot claim any more pages + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, last_reward_era), + Error::::AlreadyClaimed.with_weight(err_weight) ); + // Create 4 nominator pages + for i in 100..200 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + } + + let test_era = last_era + 1; + mock::start_active_era(test_era); + + Staking::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(test_era + 1); + // Out of order claims works. - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 69)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 23)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 42)); - assert_eq!( - Staking::ledger(11.into()).unwrap(), - StakingLedgerInspect { - stash: 11, - total: 1000, - active: 1000, - unlocking: Default::default(), - claimed_rewards: bounded_vec![ - expected_start_reward_era, - 23, - 42, - 69, - expected_last_reward_era - ] - } + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, test_era, 2)); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2]); + + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0]); + + // cannot claim page 2 again + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, test_era, 2), + Error::::AlreadyClaimed.with_weight(err_weight) ); + + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0, 1]); + + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0, 1, 3]); + }); +} + +#[test] +fn test_page_count_and_size() { + // Test that payout_stakers work in general and that it pays the correct amount of reward. + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1000; + // Track the exposure of the validator and all nominators. + // Create a validator: + bond_validator(11, balance); // Default(64) + assert_eq!(Validators::::count(), 1); + + // Create nominators, targeting stash of validators + for i in 0..100 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + } + + mock::start_active_era(1); + + // Since max exposure page size is 64, 2 pages of nominators are created. + assert_eq!(EraInfo::::get_page_count(1, &11), 2); + + // first page has 64 nominators + assert_eq!(EraInfo::::get_paged_exposure(1, &11, 0).unwrap().others().len(), 64); + // second page has 36 nominators + assert_eq!(EraInfo::::get_paged_exposure(1, &11, 1).unwrap().others().len(), 36); + + // now lets decrease page size + MaxExposurePageSize::set(32); + mock::start_active_era(2); + // now we expect 4 pages. + assert_eq!(EraInfo::::get_page_count(2, &11), 4); + // first 3 pages have 32 nominators each + assert_eq!(EraInfo::::get_paged_exposure(2, &11, 0).unwrap().others().len(), 32); + assert_eq!(EraInfo::::get_paged_exposure(2, &11, 1).unwrap().others().len(), 32); + assert_eq!(EraInfo::::get_paged_exposure(2, &11, 2).unwrap().others().len(), 32); + assert_eq!(EraInfo::::get_paged_exposure(2, &11, 3).unwrap().others().len(), 4); + + // now lets decrease page size even more + MaxExposurePageSize::set(5); + mock::start_active_era(3); + + // now we expect the max 20 pages (100/5). + assert_eq!(EraInfo::::get_page_count(3, &11), 20); }); } @@ -3897,12 +4286,12 @@ fn payout_stakers_handles_basic_errors() { // Wrong Era, too big assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2), + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0), Error::::InvalidEraToReward.with_weight(err_weight) ); // Wrong Staker assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 10, 1), + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 10, 1, 0), Error::::NotStash.with_weight(err_weight) ); @@ -3922,33 +4311,137 @@ fn payout_stakers_handles_basic_errors() { // to payout era starting from expected_start_reward_era=19 through // expected_last_reward_era=98 (80 total eras), but not 18 or 99. assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_start_reward_era - 1), + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era - 1, + 0 + ), Error::::InvalidEraToReward.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_last_reward_era + 1), + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era + 1, + 0 + ), Error::::InvalidEraToReward.with_weight(err_weight) ); - assert_ok!(Staking::payout_stakers( + assert_ok!(Staking::payout_stakers_by_page( RuntimeOrigin::signed(1337), 11, - expected_start_reward_era + expected_start_reward_era, + 0 )); - assert_ok!(Staking::payout_stakers( + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 0 + )); + + // can call page 1 + assert_ok!(Staking::payout_stakers_by_page( RuntimeOrigin::signed(1337), 11, - expected_last_reward_era + expected_last_reward_era, + 1 )); // Can't claim again assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_start_reward_era), + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era, + 0 + ), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 0 + ), Error::::AlreadyClaimed.with_weight(err_weight) ); + assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_last_reward_era), + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 1 + ), Error::::AlreadyClaimed.with_weight(err_weight) ); + + // invalid page + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 2 + ), + Error::::InvalidPage.with_weight(err_weight) + ); + }); +} + +#[test] +fn test_commission_paid_across_pages() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1; + let commission = 50; + // Create a validator: + bond_validator(11, balance); + assert_ok!(Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { commission: Perbill::from_percent(commission), blocked: false } + )); + assert_eq!(Validators::::count(), 1); + + // Create nominators, targeting stash of validators + for i in 0..200 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + } + + mock::start_active_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + + // Since `MaxExposurePageSize = 64`, there are four pages of validator + // exposure. + assert_eq!(EraInfo::::get_page_count(1, &11), 4); + + // compute and ensure the reward amount is greater than zero. + let payout = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(2); + + let initial_balance = Balances::free_balance(&11); + // Payout rewards for first exposure page + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); + + let controller_balance_after_p0_payout = Balances::free_balance(&11); + + // some commission is paid + assert!(initial_balance < controller_balance_after_p0_payout); + + // payout all pages + for i in 1..4 { + let before_balance = Balances::free_balance(&11); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, i)); + let after_balance = Balances::free_balance(&11); + // some commission is paid for every page + assert!(before_balance < after_balance); + } + + assert_eq_error_rate!(Balances::free_balance(&11), initial_balance + payout / 2, 1,); }); } @@ -3957,8 +4450,7 @@ fn payout_stakers_handles_weight_refund() { // Note: this test relies on the assumption that `payout_stakers_alive_staked` is solely used by // `payout_stakers` to calculate the weight of each payout op. ExtBuilder::default().has_stakers(false).build_and_execute(|| { - let max_nom_rewarded = - <::MaxNominatorRewardedPerValidator as Get<_>>::get(); + let max_nom_rewarded = MaxExposurePageSize::get(); // Make sure the configured value is meaningful for our use. assert!(max_nom_rewarded >= 4); let half_max_nom_rewarded = max_nom_rewarded / 2; @@ -3994,7 +4486,11 @@ fn payout_stakers_handles_weight_refund() { start_active_era(2); // Collect payouts when there are no nominators - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 1 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 1, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); @@ -4007,7 +4503,11 @@ fn payout_stakers_handles_weight_refund() { start_active_era(3); // Collect payouts for an era where the validator did not receive any points. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 2 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 2, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); @@ -4020,7 +4520,11 @@ fn payout_stakers_handles_weight_refund() { start_active_era(4); // Collect payouts when the validator has `half_max_nom_rewarded` nominators. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 3 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 3, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); @@ -4043,14 +4547,22 @@ fn payout_stakers_handles_weight_refund() { start_active_era(6); // Collect payouts when the validator had `half_max_nom_rewarded` nominators. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 5, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), max_nom_rewarded_weight); // Try and collect payouts for an era that has already been collected. - let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let call = TestCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 5, + page: 0, + }); let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(20)); assert!(result.is_err()); @@ -4060,7 +4572,7 @@ fn payout_stakers_handles_weight_refund() { } #[test] -fn bond_during_era_correctly_populates_claimed_rewards() { +fn bond_during_era_does_not_populate_legacy_claimed_rewards() { ExtBuilder::default().has_stakers(false).build_and_execute(|| { // Era = None bond_validator(9, 1000); @@ -4071,7 +4583,7 @@ fn bond_during_era_correctly_populates_claimed_rewards() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); mock::start_active_era(5); @@ -4083,13 +4595,12 @@ fn bond_during_era_correctly_populates_claimed_rewards() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (0..5).collect::>().try_into().unwrap(), + legacy_claimed_rewards: bounded_vec![], } ); // make sure only era upto history depth is stored let current_era = 99; - let last_reward_era = 99 - HistoryDepth::get(); mock::start_active_era(current_era); bond_validator(13, 1000); assert_eq!( @@ -4099,10 +4610,7 @@ fn bond_during_era_correctly_populates_claimed_rewards() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (last_reward_era..current_era) - .collect::>() - .try_into() - .unwrap(), + legacy_claimed_rewards: Default::default(), } ); }); @@ -4131,7 +4639,7 @@ fn offences_weight_calculated_correctly() { >, > = (1..10) .map(|i| OffenceDetails { - offender: (i, Staking::eras_stakers(active_era(), i)), + offender: (i, Staking::eras_stakers(active_era(), &i)), reporters: vec![], }) .collect(); @@ -4147,7 +4655,7 @@ fn offences_weight_calculated_correctly() { // On Offence with one offenders, Applied let one_offender = [OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![1], }]; @@ -4204,7 +4712,7 @@ fn payout_creates_controller() { // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(2); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(controller), 11, 1)); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); // Controller is created assert!(Balances::free_balance(controller) > 0); @@ -4232,7 +4740,7 @@ fn payout_to_any_account_works() { // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(2); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); // Payment is successful assert!(Balances::free_balance(42) > 0); @@ -4355,7 +4863,7 @@ fn cannot_rebond_to_lower_than_ed() { total: 11 * 1000, active: 11 * 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4369,7 +4877,7 @@ fn cannot_rebond_to_lower_than_ed() { total: 11 * 1000, active: 0, unlocking: bounded_vec![UnlockChunk { value: 11 * 1000, era: 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4395,7 +4903,7 @@ fn cannot_bond_extra_to_lower_than_ed() { total: 11 * 1000, active: 11 * 1000, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4409,7 +4917,7 @@ fn cannot_bond_extra_to_lower_than_ed() { total: 11 * 1000, active: 0, unlocking: bounded_vec![UnlockChunk { value: 11 * 1000, era: 3 }], - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4436,7 +4944,7 @@ fn do_not_die_when_active_is_ed() { total: 1000 * ed, active: 1000 * ed, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); @@ -4453,7 +4961,7 @@ fn do_not_die_when_active_is_ed() { total: ed, active: ed, unlocking: Default::default(), - claimed_rewards: bounded_vec![], + legacy_claimed_rewards: bounded_vec![], } ); }) @@ -5528,7 +6036,7 @@ fn proportional_slash_stop_slashing_if_remaining_zero() { let unlocking = bounded_vec![c(1, 10), c(2, 10)]; // Given - let mut ledger = StakingLedger::::new(123, 20, bounded_vec![]); + let mut ledger = StakingLedger::::new(123, 20); ledger.total = 40; ledger.unlocking = unlocking; @@ -5542,7 +6050,7 @@ fn proportional_slash_stop_slashing_if_remaining_zero() { fn proportional_ledger_slash_works() { let c = |era, value| UnlockChunk:: { era, value }; // Given - let mut ledger = StakingLedger::::new(123, 10, bounded_vec![]); + let mut ledger = StakingLedger::::new(123, 10); assert_eq!(BondingDuration::get(), 3); // When we slash a ledger with no unlocking chunks @@ -5758,149 +6266,6 @@ fn proportional_ledger_slash_works() { ); } -#[test] -fn pre_bonding_era_cannot_be_claimed() { - // Verifies initial conditions of mock - ExtBuilder::default().nominate(false).build_and_execute(|| { - let history_depth = HistoryDepth::get(); - // jump to some era above history_depth - let mut current_era = history_depth + 10; - let last_reward_era = current_era - 1; - let start_reward_era = current_era - history_depth; - - // put some money in stash=3 and controller=4. - for i in 3..5 { - let _ = Balances::make_free_balance_be(&i, 2000); - } - - mock::start_active_era(current_era); - - // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Controller)); - - let claimed_rewards: BoundedVec<_, _> = - (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); - assert_eq!( - Staking::ledger(3.into()).unwrap(), - StakingLedgerInspect { - stash: 3, - total: 1500, - active: 1500, - unlocking: Default::default(), - claimed_rewards, - } - ); - - // start next era - current_era = current_era + 1; - mock::start_active_era(current_era); - - // claiming reward for last era in which validator was active works - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 1)); - - // consumed weight for all payout_stakers dispatches that fail - let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); - // cannot claim rewards for an era before bonding occured as it is - // already marked as claimed. - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 2), - Error::::AlreadyClaimed.with_weight(err_weight) - ); - - // decoding will fail now since Staking Ledger is in corrupt state - HistoryDepth::set(history_depth - 1); - assert!(Staking::ledger(4.into()).is_err()); - - // make sure stakers still cannot claim rewards that they are not meant to - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 2), - Error::::NotController - ); - - // fix the corrupted state for post conditions check - HistoryDepth::set(history_depth); - }); -} - -#[test] -fn reducing_history_depth_abrupt() { - // Verifies initial conditions of mock - ExtBuilder::default().nominate(false).build_and_execute(|| { - let original_history_depth = HistoryDepth::get(); - let mut current_era = original_history_depth + 10; - let last_reward_era = current_era - 1; - let start_reward_era = current_era - original_history_depth; - - // put some money in (stash, controller)=(3,3),(5,5). - for i in 3..7 { - let _ = Balances::make_free_balance_be(&i, 2000); - } - - // start current era - mock::start_active_era(current_era); - - // add a new candidate for being a staker. account 3 controlled by 3. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Controller)); - - // all previous era before the bonding action should be marked as - // claimed. - let claimed_rewards: BoundedVec<_, _> = - (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); - assert_eq!( - Staking::ledger(3.into()).unwrap(), - StakingLedgerInspect { - stash: 3, - total: 1500, - active: 1500, - unlocking: Default::default(), - claimed_rewards, - } - ); - - // next era - current_era = current_era + 1; - mock::start_active_era(current_era); - - // claiming reward for last era in which validator was active works - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 1)); - - // next era - current_era = current_era + 1; - mock::start_active_era(current_era); - - // history_depth reduced without migration - let history_depth = original_history_depth - 1; - HistoryDepth::set(history_depth); - // claiming reward does not work anymore - assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(3), 3, current_era - 1), - Error::::NotController - ); - - // new stakers can still bond - assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 1200, RewardDestination::Controller)); - - // new staking ledgers created will be bounded by the current history depth - let last_reward_era = current_era - 1; - let start_reward_era = current_era - history_depth; - let claimed_rewards: BoundedVec<_, _> = - (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); - assert_eq!( - Staking::ledger(5.into()).unwrap(), - StakingLedgerInspect { - stash: 5, - total: 1200, - active: 1200, - unlocking: Default::default(), - claimed_rewards, - } - ); - - // fix the corrupted state for post conditions check - HistoryDepth::set(original_history_depth); - }); -} - #[test] fn reducing_max_unlocking_chunks_abrupt() { // Concern is on validators only @@ -6047,6 +6412,282 @@ fn set_min_commission_works_with_admin_origin() { }) } +#[test] +fn can_page_exposure() { + let mut others: Vec> = vec![]; + let mut total_stake: Balance = 0; + // 19 nominators + for i in 1..20 { + let individual_stake: Balance = 100 * i as Balance; + others.push(IndividualExposure { who: i, value: individual_stake }); + total_stake += individual_stake; + } + let own_stake: Balance = 500; + total_stake += own_stake; + assert_eq!(total_stake, 19_500); + // build full exposure set + let exposure: Exposure = + Exposure { total: total_stake, own: own_stake, others }; + + // when + let (exposure_metadata, exposure_page): ( + PagedExposureMetadata, + Vec>, + ) = exposure.clone().into_pages(3); + + // then + // 7 pages of nominators. + assert_eq!(exposure_page.len(), 7); + assert_eq!(exposure_metadata.page_count, 7); + // first page stake = 100 + 200 + 300 + assert!(matches!(exposure_page[0], ExposurePage { page_total: 600, .. })); + // second page stake = 0 + 400 + 500 + 600 + assert!(matches!(exposure_page[1], ExposurePage { page_total: 1500, .. })); + // verify overview has the total + assert_eq!(exposure_metadata.total, 19_500); + // verify total stake is same as in the original exposure. + assert_eq!( + exposure_page.iter().map(|a| a.page_total).reduce(|a, b| a + b).unwrap(), + 19_500 - exposure_metadata.own + ); + // verify own stake is correct + assert_eq!(exposure_metadata.own, 500); + // verify number of nominators are same as in the original exposure. + assert_eq!(exposure_page.iter().map(|a| a.others.len()).reduce(|a, b| a + b).unwrap(), 19); + assert_eq!(exposure_metadata.nominator_count, 19); +} + +#[test] +fn should_retain_era_info_only_upto_history_depth() { + ExtBuilder::default().build_and_execute(|| { + // remove existing exposure + Pallet::::clear_era_information(0); + let validator_stash = 10; + + for era in 0..4 { + ClaimedRewards::::insert(era, &validator_stash, vec![0, 1, 2]); + for page in 0..3 { + ErasStakersPaged::::insert( + (era, &validator_stash, page), + ExposurePage { page_total: 100, others: vec![] }, + ); + } + } + + for i in 0..4 { + // Count of entries remaining in ClaimedRewards = total - cleared_count + assert_eq!(ClaimedRewards::::iter().count(), (4 - i)); + // 1 claimed_rewards entry for each era + assert_eq!(ClaimedRewards::::iter_prefix(i as EraIndex).count(), 1); + // 3 entries (pages) for each era + assert_eq!(ErasStakersPaged::::iter_prefix((i as EraIndex,)).count(), 3); + + // when clear era info + Pallet::::clear_era_information(i as EraIndex); + + // then all era entries are cleared + assert_eq!(ClaimedRewards::::iter_prefix(i as EraIndex).count(), 0); + assert_eq!(ErasStakersPaged::::iter_prefix((i as EraIndex,)).count(), 0); + } + }); +} + +#[test] +fn test_legacy_claimed_rewards_is_checked_at_reward_payout() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // Create a validator: + bond_validator(11, 1000); + + // reward validator for next 2 eras + mock::start_active_era(1); + Pallet::::reward_by_ids(vec![(11, 1)]); + mock::start_active_era(2); + Pallet::::reward_by_ids(vec![(11, 1)]); + mock::start_active_era(3); + + //verify rewards are not claimed + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + false + ); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 2, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + false + ); + + // assume reward claim for era 1 was stored in legacy storage + Ledger::::insert( + 11, + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![1], + }, + ); + + // verify rewards for era 1 cannot be claimed + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0), + Error::::AlreadyClaimed + .with_weight(::WeightInfo::payout_stakers_alive_staked(0)), + ); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + true + ); + + // verify rewards for era 2 can be claimed + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0)); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 2, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + true + ); + // but the new claimed rewards for era 2 is not stored in legacy storage + assert_eq!( + Ledger::::get(11).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![1], + }, + ); + // instead it is kept in `ClaimedRewards` + assert_eq!(ClaimedRewards::::get(2, 11), vec![0]); + }); +} + +#[test] +fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // case 1: exposure exist in clipped. + // set page cap to 10 + MaxExposurePageSize::set(10); + bond_validator(11, 1000); + let mut expected_individual_exposures: Vec> = vec![]; + let mut total_exposure: Balance = 0; + // 1st exposure page + for i in 0..10 { + let who = 1000 + i; + let value = 1000 + i as Balance; + bond_nominator(who, value, vec![11]); + expected_individual_exposures.push(IndividualExposure { who, value }); + total_exposure += value; + } + + for i in 10..15 { + let who = 1000 + i; + let value = 1000 + i as Balance; + bond_nominator(who, value, vec![11]); + expected_individual_exposures.push(IndividualExposure { who, value }); + total_exposure += value; + } + + mock::start_active_era(1); + // reward validator for current era + Pallet::::reward_by_ids(vec![(11, 1)]); + + // start new era + mock::start_active_era(2); + // verify exposure for era 1 is stored in paged storage, that each exposure is stored in + // one and only one page, and no exposure is repeated. + let actual_exposure_page_0 = ErasStakersPaged::::get((1, 11, 0)).unwrap(); + let actual_exposure_page_1 = ErasStakersPaged::::get((1, 11, 1)).unwrap(); + expected_individual_exposures.iter().for_each(|exposure| { + assert!( + actual_exposure_page_0.others.contains(exposure) || + actual_exposure_page_1.others.contains(exposure) + ); + }); + assert_eq!( + expected_individual_exposures.len(), + actual_exposure_page_0.others.len() + actual_exposure_page_1.others.len() + ); + // verify `EraInfo` returns page from paged storage + assert_eq!( + EraInfo::::get_paged_exposure(1, &11, 0).unwrap().others(), + &actual_exposure_page_0.others + ); + assert_eq!( + EraInfo::::get_paged_exposure(1, &11, 1).unwrap().others(), + &actual_exposure_page_1.others + ); + assert_eq!(EraInfo::::get_page_count(1, &11), 2); + + // case 2: exposure exist in ErasStakers and ErasStakersClipped (legacy). + // delete paged storage and add exposure to clipped storage + >::remove((1, 11, 0)); + >::remove((1, 11, 1)); + >::remove(1, 11); + + >::insert( + 1, + 11, + Exposure { + total: total_exposure, + own: 1000, + others: expected_individual_exposures.clone(), + }, + ); + let mut clipped_exposure = expected_individual_exposures.clone(); + clipped_exposure.sort_by(|a, b| b.who.cmp(&a.who)); + clipped_exposure.truncate(10); + >::insert( + 1, + 11, + Exposure { total: total_exposure, own: 1000, others: clipped_exposure.clone() }, + ); + + // verify `EraInfo` returns exposure from clipped storage + let actual_exposure_paged = EraInfo::::get_paged_exposure(1, &11, 0).unwrap(); + assert_eq!(actual_exposure_paged.others(), &clipped_exposure); + assert_eq!(actual_exposure_paged.own(), 1000); + assert_eq!(actual_exposure_paged.exposure_metadata.page_count, 1); + + let actual_exposure_full = EraInfo::::get_full_exposure(1, &11); + assert_eq!(actual_exposure_full.others, expected_individual_exposures); + assert_eq!(actual_exposure_full.own, 1000); + assert_eq!(actual_exposure_full.total, total_exposure); + + // for pages other than 0, clipped storage returns empty exposure + assert_eq!(EraInfo::::get_paged_exposure(1, &11, 1), None); + // page size is 1 for clipped storage + assert_eq!(EraInfo::::get_page_count(1, &11), 1); + + // payout for page 0 works + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0)); + // payout for page 1 fails + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 1), + Error::::InvalidPage + .with_weight(::WeightInfo::payout_stakers_alive_staked(0)) + ); + }); +} + mod staking_interface { use frame_support::storage::with_storage_layer; use sp_staking::StakingInterface; @@ -6076,7 +6717,7 @@ mod staking_interface { ExtBuilder::default().build_and_execute(|| { on_offence_now( &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), + offender: (11, Staking::eras_stakers(active_era(), &11)), reporters: vec![], }], &[Perbill::from_percent(100)], @@ -6217,7 +6858,7 @@ mod ledger { assert!(ledger.clone().bond(reward_dest).is_err()); // once bonded, update works as expected. - ledger.claimed_rewards = bounded_vec![1]; + ledger.legacy_claimed_rewards = bounded_vec![1]; assert_ok!(ledger.update()); }) } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index f2c65e677cac..ad6dbccde9f8 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -15,32 +15,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_staking +//! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_staking -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/staking/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_staking +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/staking/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +47,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_staking. +/// Weight functions needed for `pallet_staking`. pub trait WeightInfo { fn bond() -> Weight; fn bond_extra() -> Weight; @@ -84,599 +81,615 @@ pub trait WeightInfo { fn set_min_commission() -> Weight; } -/// Weights for pallet_staking using the Substrate node and recommended hardware. +/// Weights for `pallet_staking` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `1047` + // Measured: `927` // Estimated: `4764` - // Minimum execution time: 53_983_000 picoseconds. - Weight::from_parts(55_296_000, 4764) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 42_811_000 picoseconds. + Weight::from_parts(44_465_000, 4764) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn bond_extra() -> Weight { // Proof Size summary in bytes: - // Measured: `2028` + // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 96_590_000 picoseconds. - Weight::from_parts(98_921_000, 8877) + // Minimum execution time: 87_628_000 picoseconds. + Weight::from_parts(90_020_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `2233` + // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 99_901_000 picoseconds. - Weight::from_parts(102_919_000, 8877) + // Minimum execution time: 91_655_000 picoseconds. + Weight::from_parts(94_146_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1021` + // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 45_230_000 picoseconds. - Weight::from_parts(47_052_829, 4764) - // Standard Error: 1_044 - .saturating_add(Weight::from_parts(43_887, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Minimum execution time: 42_953_000 picoseconds. + Weight::from_parts(44_648_505, 4764) + // Standard Error: 937 + .saturating_add(Weight::from_parts(51_090, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2294 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 97_534_000 picoseconds. - Weight::from_parts(104_772_163, 6248) - // Standard Error: 3_674 - .saturating_add(Weight::from_parts(1_470_124, 0).saturating_mul(s.into())) + // Minimum execution time: 89_218_000 picoseconds. + Weight::from_parts(97_761_884, 6248) + // Standard Error: 3_888 + .saturating_add(Weight::from_parts(1_346_441, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:1 w:0) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:1 w:0) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:1) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:1 w:0) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:1) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn validate() -> Weight { // Proof Size summary in bytes: - // Measured: `1414` + // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 57_467_000 picoseconds. - Weight::from_parts(59_437_000, 4556) + // Minimum execution time: 51_200_000 picoseconds. + Weight::from_parts(53_403_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:128 w:128) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:128 w:128) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1285 + k * (569 ±0)` + // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 32_857_000 picoseconds. - Weight::from_parts(37_116_967, 4556) - // Standard Error: 9_522 - .saturating_add(Weight::from_parts(8_796_167, 0).saturating_mul(k.into())) + // Minimum execution time: 28_963_000 picoseconds. + Weight::from_parts(29_884_371, 4556) + // Standard Error: 9_063 + .saturating_add(Weight::from_parts(6_532_967, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:17 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1908 + n * (102 ±0)` + // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 69_613_000 picoseconds. - Weight::from_parts(68_079_061, 6248) - // Standard Error: 18_554 - .saturating_add(Weight::from_parts(4_012_761, 0).saturating_mul(n.into())) + // Minimum execution time: 64_644_000 picoseconds. + Weight::from_parts(62_855_016, 6248) + // Standard Error: 17_528 + .saturating_add(Weight::from_parts(3_993_850, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1748` + // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 60_430_000 picoseconds. - Weight::from_parts(62_702_000, 6248) + // Minimum execution time: 54_505_000 picoseconds. + Weight::from_parts(56_026_000, 6248) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn set_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `808` + // Measured: `902` // Estimated: `4556` - // Minimum execution time: 14_276_000 picoseconds. - Weight::from_parts(14_766_000, 4556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 16_639_000 picoseconds. + Weight::from_parts(17_342_000, 4556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:2) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:2) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: - // Measured: `907` - // Estimated: `8122` - // Minimum execution time: 21_710_000 picoseconds. - Weight::from_parts(22_430_000, 8122) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `902` + // Estimated: `4556` + // Minimum execution time: 20_334_000 picoseconds. + Weight::from_parts(21_067_000, 4556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Staking ValidatorCount (r:0 w:1) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::ValidatorCount` (r:0 w:1) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_validator_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_970_000 picoseconds. - Weight::from_parts(3_120_000, 0) + // Minimum execution time: 2_680_000 picoseconds. + Weight::from_parts(2_774_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_no_eras() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_362_000 picoseconds. - Weight::from_parts(9_785_000, 0) + // Minimum execution time: 8_613_000 picoseconds. + Weight::from_parts(8_922_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_275_000 picoseconds. - Weight::from_parts(9_678_000, 0) + // Minimum execution time: 8_657_000 picoseconds. + Weight::from_parts(9_020_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era_always() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_414_000 picoseconds. - Weight::from_parts(9_848_000, 0) + // Minimum execution time: 8_600_000 picoseconds. + Weight::from_parts(9_157_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking Invulnerables (r:0 w:1) - /// Proof Skipped: Staking Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::Invulnerables` (r:0 w:1) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_061_000 picoseconds. - Weight::from_parts(3_618_535, 0) - // Standard Error: 44 - .saturating_add(Weight::from_parts(10_774, 0).saturating_mul(v.into())) + // Minimum execution time: 2_792_000 picoseconds. + Weight::from_parts(3_293_694, 0) + // Standard Error: 31 + .saturating_add(Weight::from_parts(10_668, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2018 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 87_914_000 picoseconds. - Weight::from_parts(95_688_129, 6248) - // Standard Error: 5_030 - .saturating_add(Weight::from_parts(1_487_249, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Minimum execution time: 86_537_000 picoseconds. + Weight::from_parts(95_127_637, 6248) + // Standard Error: 3_902 + .saturating_add(Weight::from_parts(1_336_182, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking UnappliedSlashes (r:1 w:1) - /// Proof Skipped: Staking UnappliedSlashes (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `66639` - // Estimated: `70104` - // Minimum execution time: 99_269_000 picoseconds. - Weight::from_parts(1_154_264_637, 70104) - // Standard Error: 76_592 - .saturating_add(Weight::from_parts(6_490_888, 0).saturating_mul(s.into())) + // Measured: `66672` + // Estimated: `70137` + // Minimum execution time: 100_777_000 picoseconds. + Weight::from_parts(896_540_406, 70137) + // Standard Error: 57_788 + .saturating_add(Weight::from_parts(4_870_910, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:257 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:257 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:257 w:257) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:257 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:258 w:258) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::ClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:257 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `20217 + n * (143 ±0)` - // Estimated: `19844 + n * (2603 ±1)` - // Minimum execution time: 91_767_000 picoseconds. - Weight::from_parts(146_781_264, 19844) - // Standard Error: 31_341 - .saturating_add(Weight::from_parts(30_553_008, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `21644 + n * (155 ±0)` + // Estimated: `21412 + n * (2603 ±0)` + // Minimum execution time: 133_129_000 picoseconds. + Weight::from_parts(190_983_630, 21412) + // Standard Error: 17_497 + .saturating_add(Weight::from_parts(24_723_153, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:257 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:257 w:257) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:257 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:257 w:257) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:257 w:257) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:257 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:257 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:257 w:257) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersClipped` (r:1 w:0) + /// Proof: `Staking::ErasStakersClipped` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:257 w:257) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:257 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:257 w:257) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:257 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33190 + n * (377 ±0)` - // Estimated: `30845 + n * (3774 ±0)` - // Minimum execution time: 121_303_000 picoseconds. - Weight::from_parts(151_046_907, 30845) - // Standard Error: 41_899 - .saturating_add(Weight::from_parts(49_837_804, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `33297 + n * (377 ±0)` + // Estimated: `30944 + n * (3774 ±3)` + // Minimum execution time: 149_773_000 picoseconds. + Weight::from_parts(151_527_124, 30944) + // Standard Error: 24_152 + .saturating_add(Weight::from_parts(46_124_074, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2029 + l * (7 ±0)` + // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 90_068_000 picoseconds. - Weight::from_parts(93_137_456, 8877) - // Standard Error: 4_799 - .saturating_add(Weight::from_parts(54_421, 0).saturating_mul(l.into())) + // Minimum execution time: 81_618_000 picoseconds. + Weight::from_parts(85_245_630, 8877) + // Standard Error: 5_049 + .saturating_add(Weight::from_parts(39_811, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2294 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 103_139_000 picoseconds. - Weight::from_parts(107_036_296, 6248) - // Standard Error: 3_935 - .saturating_add(Weight::from_parts(1_465_860, 0).saturating_mul(s.into())) + // Minimum execution time: 95_395_000 picoseconds. + Weight::from_parts(100_459_234, 6248) + // Standard Error: 3_781 + .saturating_add(Weight::from_parts(1_333_607, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:200 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:110 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:110 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:11 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:110 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:110 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinimumValidatorCount (r:1 w:0) - /// Proof: Staking MinimumValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:1) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:0 w:10) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:0 w:10) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:0 w:10) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasTotalStake (r:0 w:1) - /// Proof: Staking ErasTotalStake (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:0 w:1) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:110 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:110 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:110 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:110 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:11 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumValidatorCount` (r:1 w:0) + /// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:1) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:10) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:0 w:10) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:0 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 587_156_000 picoseconds. - Weight::from_parts(590_176_000, 512390) - // Standard Error: 2_008_420 - .saturating_add(Weight::from_parts(64_526_052, 0).saturating_mul(v.into())) - // Standard Error: 200_128 - .saturating_add(Weight::from_parts(18_070_222, 0).saturating_mul(n.into())) + // Minimum execution time: 571_337_000 picoseconds. + Weight::from_parts(578_857_000, 512390) + // Standard Error: 2_090_511 + .saturating_add(Weight::from_parts(68_626_083, 0).saturating_mul(v.into())) + // Standard Error: 208_307 + .saturating_add(Weight::from_parts(18_645_374, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:200 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2000 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:2000 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1000 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2000 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2000 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2000 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2000 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2000 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:2000 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1000 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3217 + n * (911 ±0) + v * (395 ±0)` + // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_399_721_000 picoseconds. - Weight::from_parts(34_605_803_000, 512390) - // Standard Error: 380_106 - .saturating_add(Weight::from_parts(5_426_220, 0).saturating_mul(v.into())) - // Standard Error: 380_106 - .saturating_add(Weight::from_parts(3_318_197, 0).saturating_mul(n.into())) + // Minimum execution time: 34_590_734_000 picoseconds. + Weight::from_parts(35_238_091_000, 512390) + // Standard Error: 427_974 + .saturating_add(Weight::from_parts(5_084_196, 0).saturating_mul(v.into())) + // Standard Error: 427_974 + .saturating_add(Weight::from_parts(4_503_420, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -684,709 +697,725 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1001 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `983 + v * (50 ±0)` + // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_392_849_000 picoseconds. - Weight::from_parts(64_373_879, 3510) - // Standard Error: 8_995 - .saturating_add(Weight::from_parts(4_721_536, 0).saturating_mul(v.into())) + // Minimum execution time: 2_509_588_000 picoseconds. + Weight::from_parts(89_050_539, 3510) + // Standard Error: 11_803 + .saturating_add(Weight::from_parts(5_031_416, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_set() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_529_000 picoseconds. - Weight::from_parts(7_970_000, 0) + // Minimum execution time: 5_347_000 picoseconds. + Weight::from_parts(5_562_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_remove() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_011_000 picoseconds. - Weight::from_parts(7_317_000, 0) + // Minimum execution time: 4_725_000 picoseconds. + Weight::from_parts(5_075_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:1 w:0) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:1 w:0) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1871` + // Measured: `1773` // Estimated: `6248` - // Minimum execution time: 75_982_000 picoseconds. - Weight::from_parts(77_412_000, 6248) + // Minimum execution time: 67_204_000 picoseconds. + Weight::from_parts(69_197_000, 6248) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) fn force_apply_min_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `694` + // Measured: `691` // Estimated: `3510` - // Minimum execution time: 13_923_000 picoseconds. - Weight::from_parts(14_356_000, 3510) + // Minimum execution time: 12_497_000 picoseconds. + Weight::from_parts(12_943_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_min_commission() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_415_000 picoseconds. - Weight::from_parts(3_679_000, 0) + // Minimum execution time: 3_245_000 picoseconds. + Weight::from_parts(3_352_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `1047` + // Measured: `927` // Estimated: `4764` - // Minimum execution time: 53_983_000 picoseconds. - Weight::from_parts(55_296_000, 4764) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 42_811_000 picoseconds. + Weight::from_parts(44_465_000, 4764) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn bond_extra() -> Weight { // Proof Size summary in bytes: - // Measured: `2028` + // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 96_590_000 picoseconds. - Weight::from_parts(98_921_000, 8877) + // Minimum execution time: 87_628_000 picoseconds. + Weight::from_parts(90_020_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `2233` + // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 99_901_000 picoseconds. - Weight::from_parts(102_919_000, 8877) + // Minimum execution time: 91_655_000 picoseconds. + Weight::from_parts(94_146_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1021` + // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 45_230_000 picoseconds. - Weight::from_parts(47_052_829, 4764) - // Standard Error: 1_044 - .saturating_add(Weight::from_parts(43_887, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Minimum execution time: 42_953_000 picoseconds. + Weight::from_parts(44_648_505, 4764) + // Standard Error: 937 + .saturating_add(Weight::from_parts(51_090, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2294 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 97_534_000 picoseconds. - Weight::from_parts(104_772_163, 6248) - // Standard Error: 3_674 - .saturating_add(Weight::from_parts(1_470_124, 0).saturating_mul(s.into())) + // Minimum execution time: 89_218_000 picoseconds. + Weight::from_parts(97_761_884, 6248) + // Standard Error: 3_888 + .saturating_add(Weight::from_parts(1_346_441, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:1 w:0) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:1 w:0) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:1) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:1 w:0) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:1) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn validate() -> Weight { // Proof Size summary in bytes: - // Measured: `1414` + // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 57_467_000 picoseconds. - Weight::from_parts(59_437_000, 4556) + // Minimum execution time: 51_200_000 picoseconds. + Weight::from_parts(53_403_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:128 w:128) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:128 w:128) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1285 + k * (569 ±0)` + // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 32_857_000 picoseconds. - Weight::from_parts(37_116_967, 4556) - // Standard Error: 9_522 - .saturating_add(Weight::from_parts(8_796_167, 0).saturating_mul(k.into())) + // Minimum execution time: 28_963_000 picoseconds. + Weight::from_parts(29_884_371, 4556) + // Standard Error: 9_063 + .saturating_add(Weight::from_parts(6_532_967, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:17 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1908 + n * (102 ±0)` + // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 69_613_000 picoseconds. - Weight::from_parts(68_079_061, 6248) - // Standard Error: 18_554 - .saturating_add(Weight::from_parts(4_012_761, 0).saturating_mul(n.into())) + // Minimum execution time: 64_644_000 picoseconds. + Weight::from_parts(62_855_016, 6248) + // Standard Error: 17_528 + .saturating_add(Weight::from_parts(3_993_850, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1748` + // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 60_430_000 picoseconds. - Weight::from_parts(62_702_000, 6248) + // Minimum execution time: 54_505_000 picoseconds. + Weight::from_parts(56_026_000, 6248) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn set_payee() -> Weight { // Proof Size summary in bytes: - // Measured: `808` + // Measured: `902` // Estimated: `4556` - // Minimum execution time: 14_276_000 picoseconds. - Weight::from_parts(14_766_000, 4556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 16_639_000 picoseconds. + Weight::from_parts(17_342_000, 4556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:2) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:2) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: - // Measured: `907` - // Estimated: `8122` - // Minimum execution time: 21_710_000 picoseconds. - Weight::from_parts(22_430_000, 8122) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `902` + // Estimated: `4556` + // Minimum execution time: 20_334_000 picoseconds. + Weight::from_parts(21_067_000, 4556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Staking ValidatorCount (r:0 w:1) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::ValidatorCount` (r:0 w:1) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_validator_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_970_000 picoseconds. - Weight::from_parts(3_120_000, 0) + // Minimum execution time: 2_680_000 picoseconds. + Weight::from_parts(2_774_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_no_eras() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_362_000 picoseconds. - Weight::from_parts(9_785_000, 0) + // Minimum execution time: 8_613_000 picoseconds. + Weight::from_parts(8_922_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_275_000 picoseconds. - Weight::from_parts(9_678_000, 0) + // Minimum execution time: 8_657_000 picoseconds. + Weight::from_parts(9_020_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking ForceEra (r:0 w:1) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn force_new_era_always() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_414_000 picoseconds. - Weight::from_parts(9_848_000, 0) + // Minimum execution time: 8_600_000 picoseconds. + Weight::from_parts(9_157_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking Invulnerables (r:0 w:1) - /// Proof Skipped: Staking Invulnerables (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::Invulnerables` (r:0 w:1) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_061_000 picoseconds. - Weight::from_parts(3_618_535, 0) - // Standard Error: 44 - .saturating_add(Weight::from_parts(10_774, 0).saturating_mul(v.into())) + // Minimum execution time: 2_792_000 picoseconds. + Weight::from_parts(3_293_694, 0) + // Standard Error: 31 + .saturating_add(Weight::from_parts(10_668, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2018 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 87_914_000 picoseconds. - Weight::from_parts(95_688_129, 6248) - // Standard Error: 5_030 - .saturating_add(Weight::from_parts(1_487_249, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Minimum execution time: 86_537_000 picoseconds. + Weight::from_parts(95_127_637, 6248) + // Standard Error: 3_902 + .saturating_add(Weight::from_parts(1_336_182, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: Staking UnappliedSlashes (r:1 w:1) - /// Proof Skipped: Staking UnappliedSlashes (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `66639` - // Estimated: `70104` - // Minimum execution time: 99_269_000 picoseconds. - Weight::from_parts(1_154_264_637, 70104) - // Standard Error: 76_592 - .saturating_add(Weight::from_parts(6_490_888, 0).saturating_mul(s.into())) + // Measured: `66672` + // Estimated: `70137` + // Minimum execution time: 100_777_000 picoseconds. + Weight::from_parts(896_540_406, 70137) + // Standard Error: 57_788 + .saturating_add(Weight::from_parts(4_870_910, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:257 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:257 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:257 w:257) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:257 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:258 w:258) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::ClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:257 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `20217 + n * (143 ±0)` - // Estimated: `19844 + n * (2603 ±1)` - // Minimum execution time: 91_767_000 picoseconds. - Weight::from_parts(146_781_264, 19844) - // Standard Error: 31_341 - .saturating_add(Weight::from_parts(30_553_008, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `21644 + n * (155 ±0)` + // Estimated: `21412 + n * (2603 ±0)` + // Minimum execution time: 133_129_000 picoseconds. + Weight::from_parts(190_983_630, 21412) + // Standard Error: 17_497 + .saturating_add(Weight::from_parts(24_723_153, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) } - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasValidatorReward (r:1 w:0) - /// Proof: Staking ErasValidatorReward (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:257 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:257 w:257) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:1 w:0) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasRewardPoints (r:1 w:0) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:1 w:0) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:257 w:0) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: System Account (r:257 w:257) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:257 w:257) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:257 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:257 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:257 w:257) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersClipped` (r:1 w:0) + /// Proof: `Staking::ErasStakersClipped` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:257 w:257) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:257 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:257 w:257) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:257 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33190 + n * (377 ±0)` - // Estimated: `30845 + n * (3774 ±0)` - // Minimum execution time: 121_303_000 picoseconds. - Weight::from_parts(151_046_907, 30845) - // Standard Error: 41_899 - .saturating_add(Weight::from_parts(49_837_804, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `33297 + n * (377 ±0)` + // Estimated: `30944 + n * (3774 ±3)` + // Minimum execution time: 149_773_000 picoseconds. + Weight::from_parts(151_527_124, 30944) + // Standard Error: 24_152 + .saturating_add(Weight::from_parts(46_124_074, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into())) } - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2029 + l * (7 ±0)` + // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 90_068_000 picoseconds. - Weight::from_parts(93_137_456, 8877) - // Standard Error: 4_799 - .saturating_add(Weight::from_parts(54_421, 0).saturating_mul(l.into())) + // Minimum execution time: 81_618_000 picoseconds. + Weight::from_parts(85_245_630, 8877) + // Standard Error: 5_049 + .saturating_add(Weight::from_parts(39_811, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Staking Bonded (r:1 w:1) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:1 w:1) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:1) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Staking SpanSlash (r:0 w:100) - /// Proof: Staking SpanSlash (max_values: None, max_size: Some(76), added: 2551, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2294 + s * (4 ±0)` + // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 103_139_000 picoseconds. - Weight::from_parts(107_036_296, 6248) - // Standard Error: 3_935 - .saturating_add(Weight::from_parts(1_465_860, 0).saturating_mul(s.into())) + // Minimum execution time: 95_395_000 picoseconds. + Weight::from_parts(100_459_234, 6248) + // Standard Error: 3_781 + .saturating_add(Weight::from_parts(1_333_607, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:200 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:110 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:110 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:11 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:110 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:110 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinimumValidatorCount (r:1 w:0) - /// Proof: Staking MinimumValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:1) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakersClipped (r:0 w:10) - /// Proof Skipped: Staking ErasStakersClipped (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasValidatorPrefs (r:0 w:10) - /// Proof: Staking ErasValidatorPrefs (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:0 w:10) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking ErasTotalStake (r:0 w:1) - /// Proof: Staking ErasTotalStake (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:0 w:1) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:110 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:110 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:110 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:110 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:11 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumValidatorCount` (r:1 w:0) + /// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:1) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:10) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:0 w:10) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:0 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 587_156_000 picoseconds. - Weight::from_parts(590_176_000, 512390) - // Standard Error: 2_008_420 - .saturating_add(Weight::from_parts(64_526_052, 0).saturating_mul(v.into())) - // Standard Error: 200_128 - .saturating_add(Weight::from_parts(18_070_222, 0).saturating_mul(n.into())) + // Minimum execution time: 571_337_000 picoseconds. + Weight::from_parts(578_857_000, 512390) + // Standard Error: 2_090_511 + .saturating_add(Weight::from_parts(68_626_083, 0).saturating_mul(v.into())) + // Standard Error: 208_307 + .saturating_add(Weight::from_parts(18_645_374, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: VoterList CounterForListNodes (r:1 w:0) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:200 w:0) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2000 w:0) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:2000 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1000 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2000 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2000 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking MinimumActiveStake (r:0 w:1) - /// Proof: Staking MinimumActiveStake (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2000 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2000 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2000 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:2000 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1000 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3217 + n * (911 ±0) + v * (395 ±0)` + // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_399_721_000 picoseconds. - Weight::from_parts(34_605_803_000, 512390) - // Standard Error: 380_106 - .saturating_add(Weight::from_parts(5_426_220, 0).saturating_mul(v.into())) - // Standard Error: 380_106 - .saturating_add(Weight::from_parts(3_318_197, 0).saturating_mul(n.into())) + // Minimum execution time: 34_590_734_000 picoseconds. + Weight::from_parts(35_238_091_000, 512390) + // Standard Error: 427_974 + .saturating_add(Weight::from_parts(5_084_196, 0).saturating_mul(v.into())) + // Standard Error: 427_974 + .saturating_add(Weight::from_parts(4_503_420, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1394,113 +1423,113 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: Staking CounterForValidators (r:1 w:0) - /// Proof: Staking CounterForValidators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1001 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `983 + v * (50 ±0)` + // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_392_849_000 picoseconds. - Weight::from_parts(64_373_879, 3510) - // Standard Error: 8_995 - .saturating_add(Weight::from_parts(4_721_536, 0).saturating_mul(v.into())) + // Minimum execution time: 2_509_588_000 picoseconds. + Weight::from_parts(89_050_539, 3510) + // Standard Error: 11_803 + .saturating_add(Weight::from_parts(5_031_416, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_set() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_529_000 picoseconds. - Weight::from_parts(7_970_000, 0) + // Minimum execution time: 5_347_000 picoseconds. + Weight::from_parts(5_562_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinValidatorBond (r:0 w:1) - /// Proof: Staking MinValidatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking MaxValidatorsCount (r:0 w:1) - /// Proof: Staking MaxValidatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:0 w:1) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:0 w:1) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:0 w:1) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_staking_configs_all_remove() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_011_000 picoseconds. - Weight::from_parts(7_317_000, 0) + // Minimum execution time: 4_725_000 picoseconds. + Weight::from_parts(5_075_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking ChillThreshold (r:1 w:0) - /// Proof: Staking ChillThreshold (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Staking MaxNominatorsCount (r:1 w:0) - /// Proof: Staking MaxNominatorsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking MinNominatorBond (r:1 w:0) - /// Proof: Staking MinNominatorBond (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:2 w:2) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:1 w:0) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1871` + // Measured: `1773` // Estimated: `6248` - // Minimum execution time: 75_982_000 picoseconds. - Weight::from_parts(77_412_000, 6248) + // Minimum execution time: 67_204_000 picoseconds. + Weight::from_parts(69_197_000, 6248) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Staking MinCommission (r:1 w:0) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:1) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) fn force_apply_min_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `694` + // Measured: `691` // Estimated: `3510` - // Minimum execution time: 13_923_000 picoseconds. - Weight::from_parts(14_356_000, 3510) + // Minimum execution time: 12_497_000 picoseconds. + Weight::from_parts(12_943_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Staking MinCommission (r:0 w:1) - /// Proof: Staking MinCommission (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_min_commission() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_415_000 picoseconds. - Weight::from_parts(3_679_000, 0) + // Minimum execution time: 3_245_000 picoseconds. + Weight::from_parts(3_352_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index dfc18987d152..c2ac5ae004b1 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -21,11 +21,13 @@ //! approaches in general. Definitions related to sessions, slashing, etc go here. use crate::currency_to_vote::CurrencyToVote; -use codec::{FullCodec, MaxEncodedLen}; +use codec::{Decode, Encode, FullCodec, HasCompact, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_core::RuntimeDebug; -use sp_runtime::{DispatchError, DispatchResult, Saturating}; -use sp_std::{collections::btree_map::BTreeMap, ops::Sub, vec::Vec}; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, Zero}, + DispatchError, DispatchResult, RuntimeDebug, Saturating, +}; +use sp_std::{collections::btree_map::BTreeMap, ops::Sub, vec, vec::Vec}; pub mod offence; @@ -37,6 +39,8 @@ pub type SessionIndex = u32; /// Counter for the number of eras that have passed. pub type EraIndex = u32; +/// Type for identifying a page. +pub type Page = u32; /// Representation of a staking account, which may be a stash or controller account. /// /// Note: once the controller is completely deprecated, this enum can also be deprecated in favor of @@ -280,6 +284,9 @@ pub trait StakingInterface { } } + #[cfg(feature = "runtime-benchmarks")] + fn max_exposure_page_size() -> Page; + #[cfg(feature = "runtime-benchmarks")] fn add_era_stakers( current_era: &EraIndex, @@ -291,4 +298,122 @@ pub trait StakingInterface { fn set_current_era(era: EraIndex); } +/// The amount of exposure for an era that an individual nominator has (susceptible to slashing). +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct IndividualExposure { + /// The stash account of the nominator in question. + pub who: AccountId, + /// Amount of funds exposed. + #[codec(compact)] + pub value: Balance, +} + +/// A snapshot of the stake backing a single validator in the system. +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct Exposure { + /// The total balance backing this validator. + #[codec(compact)] + pub total: Balance, + /// The validator's own stash that is exposed. + #[codec(compact)] + pub own: Balance, + /// The portions of nominators stashes that are exposed. + pub others: Vec>, +} + +impl Default for Exposure { + fn default() -> Self { + Self { total: Default::default(), own: Default::default(), others: vec![] } + } +} + +impl< + AccountId: Clone, + Balance: HasCompact + AtLeast32BitUnsigned + Copy + codec::MaxEncodedLen, + > Exposure +{ + /// Splits an `Exposure` into `PagedExposureMetadata` and multiple chunks of + /// `IndividualExposure` with each chunk having maximum of `page_size` elements. + pub fn into_pages( + self, + page_size: Page, + ) -> (PagedExposureMetadata, Vec>) { + let individual_chunks = self.others.chunks(page_size as usize); + let mut exposure_pages: Vec> = + Vec::with_capacity(individual_chunks.len()); + + for chunk in individual_chunks { + let mut page_total: Balance = Zero::zero(); + let mut others: Vec> = + Vec::with_capacity(chunk.len()); + for individual in chunk.iter() { + page_total.saturating_accrue(individual.value); + others.push(IndividualExposure { + who: individual.who.clone(), + value: individual.value, + }) + } + + exposure_pages.push(ExposurePage { page_total, others }); + } + + ( + PagedExposureMetadata { + total: self.total, + own: self.own, + nominator_count: self.others.len() as u32, + page_count: exposure_pages.len() as Page, + }, + exposure_pages, + ) + } +} + +/// A snapshot of the stake backing a single validator in the system. +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct ExposurePage { + /// The total balance of this chunk/page. + #[codec(compact)] + pub page_total: Balance, + /// The portions of nominators stashes that are exposed. + pub others: Vec>, +} + +impl Default for ExposurePage { + fn default() -> Self { + ExposurePage { page_total: Default::default(), others: vec![] } + } +} + +/// Metadata for Paged Exposure of a validator such as total stake across pages and page count. +/// +/// In combination with the associated `ExposurePage`s, it can be used to reconstruct a full +/// `Exposure` set of a validator. This is useful for cases where we want to query full set of +/// `Exposure` as one page (for backward compatibility). +#[derive( + PartialEq, + Eq, + PartialOrd, + Ord, + Clone, + Encode, + Decode, + RuntimeDebug, + TypeInfo, + Default, + MaxEncodedLen, +)] +pub struct PagedExposureMetadata { + /// The total balance backing this validator. + #[codec(compact)] + pub total: Balance, + /// The validator's own stash that is exposed. + #[codec(compact)] + pub own: Balance, + /// Number of nominators backing this validator. + pub nominator_count: u32, + /// Number of pages of nominators. + pub page_count: Page, +} + sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); From 2726d5af65d041adbcd3e96986a233e7c8fa07a8 Mon Sep 17 00:00:00 2001 From: jserrat <35823283+Jpserrat@users.noreply.github.com> Date: Wed, 1 Nov 2023 11:58:46 -0300 Subject: [PATCH 58/69] remove gum dependency on jaeger (#2106) Co-authored-by: Marcin S --- Cargo.lock | 12 ------------ .../node/core/pvf/execute-worker/Cargo.toml | 6 ------ .../node/core/pvf/prepare-worker/Cargo.toml | 6 ------ polkadot/node/gum/Cargo.toml | 3 +-- polkadot/node/gum/src/lib.rs | 19 +++++++++++++++++-- 5 files changed, 18 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb49533a7f94..e6d764dcd396 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12257,15 +12257,10 @@ name = "polkadot-node-core-pvf-execute-worker" version = "1.0.0" dependencies = [ "cpu-time", - "futures", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-parachain-primitives", "polkadot-primitives", - "rayon", - "sp-core", - "sp-maybe-compressed-blob", - "sp-tracing", "tracing-gum", ] @@ -12274,19 +12269,13 @@ name = "polkadot-node-core-pvf-prepare-worker" version = "1.0.0" dependencies = [ "cfg-if", - "futures", "libc", "parity-scale-codec", "polkadot-node-core-pvf-common", - "polkadot-parachain-primitives", "polkadot-primitives", "rayon", - "sc-executor", "sc-executor-common", "sc-executor-wasmtime", - "sp-io", - "sp-maybe-compressed-blob", - "sp-tracing", "tikv-jemalloc-ctl", "tracing-gum", ] @@ -19103,7 +19092,6 @@ name = "tracing-gum" version = "1.0.0" dependencies = [ "coarsetime", - "polkadot-node-jaeger", "polkadot-primitives", "tracing", "tracing-gum-proc-macro", diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 203bbd0e7859..77a9420961c0 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -8,9 +8,7 @@ license.workspace = true [dependencies] cpu-time = "1.0.0" -futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } -rayon = "1.5.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -18,9 +16,5 @@ polkadot-node-core-pvf-common = { path = "../common" } polkadot-parachain-primitives = { path = "../../../../parachain" } polkadot-primitives = { path = "../../../../primitives" } -sp-core = { path = "../../../../../substrate/primitives/core" } -sp-maybe-compressed-blob = { path = "../../../../../substrate/primitives/maybe-compressed-blob" } -sp-tracing = { path = "../../../../../substrate/primitives/tracing" } - [features] builder = [] diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index eb53ebdc941b..e5a08f8a153d 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -8,7 +8,6 @@ license.workspace = true [dependencies] cfg-if = "1.0" -futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" rayon = "1.5.1" @@ -17,15 +16,10 @@ tikv-jemalloc-ctl = { version = "0.5.0", optional = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } polkadot-node-core-pvf-common = { path = "../common" } -polkadot-parachain-primitives = { path = "../../../../parachain" } polkadot-primitives = { path = "../../../../primitives" } -sc-executor = { path = "../../../../../substrate/client/executor" } sc-executor-common = { path = "../../../../../substrate/client/executor/common" } sc-executor-wasmtime = { path = "../../../../../substrate/client/executor/wasmtime" } -sp-io = { path = "../../../../../substrate/primitives/io" } -sp-maybe-compressed-blob = { path = "../../../../../substrate/primitives/maybe-compressed-blob" } -sp-tracing = { path = "../../../../../substrate/primitives/tracing" } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemalloc-ctl = "0.5.0" diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index 01ed34f7a730..acee9efd0e09 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -9,6 +9,5 @@ description = "Stick logs together with the TraceID as provided by tempo" [dependencies] coarsetime = "0.1.22" tracing = "0.1.35" -jaeger = { package = "polkadot-node-jaeger" , path = "../jaeger" } -gum-proc-macro = { package = "tracing-gum-proc-macro" , path = "proc-macro" } +gum-proc-macro = { package = "tracing-gum-proc-macro", path = "proc-macro" } polkadot-primitives = { path = "../../primitives", features = ["std"] } diff --git a/polkadot/node/gum/src/lib.rs b/polkadot/node/gum/src/lib.rs index 1cc4d8dec1cb..dad5887af224 100644 --- a/polkadot/node/gum/src/lib.rs +++ b/polkadot/node/gum/src/lib.rs @@ -105,8 +105,23 @@ pub use tracing::{enabled, event, Level}; -#[doc(hidden)] -pub use jaeger::hash_to_trace_identifier; +// jaeger dependency + +/// Alias for the 16 byte unique identifier used with jaeger. +pub(crate) type TraceIdentifier = u128; + +/// A helper to convert the hash to the fixed size representation +/// needed for jaeger. +#[inline] +pub fn hash_to_trace_identifier(hash: Hash) -> TraceIdentifier { + let mut buf = [0u8; 16]; + buf.copy_from_slice(&hash.as_ref()[0..16]); + // The slice bytes are copied in reading order, so if interpreted + // in string form by a human, that means lower indices have higher + // values and hence corresponds to BIG endian ordering of the individual + // bytes. + u128::from_be_bytes(buf) as TraceIdentifier +} #[doc(hidden)] pub use polkadot_primitives::{CandidateHash, Hash}; From 8507f45cefe5185712938a0087fbbdf4f0c2cdfc Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 1 Nov 2023 16:06:25 +0100 Subject: [PATCH 59/69] [ci] Revert CI_IMAGE variable (#2120) CI image has been updated in the shared snippet, reverting the variable back. --- .gitlab-ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6dc7fc1a3cdb..835b668de259 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,8 +21,7 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - # CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.73.0-2023-11-01-v20231025" + CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" From dce5a8da66af8d5d2bbe639a2e39cfdd9d712af5 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Wed, 1 Nov 2023 17:11:07 +0200 Subject: [PATCH 60/69] Direct XCM `ExportMessage` fees for different bridges to different receiver accounts (#2021) --- .../assets/asset-hub-rococo/src/xcm_config.rs | 9 +- .../assets/asset-hub-rococo/tests/tests.rs | 4 +- .../asset-hub-westend/src/xcm_config.rs | 9 +- .../bridge-hub-rococo/src/xcm_config.rs | 147 ++++++++++++++++-- .../contracts-rococo/src/xcm_config.rs | 9 +- polkadot/runtime/rococo/src/xcm_config.rs | 10 +- polkadot/runtime/westend/src/xcm_config.rs | 9 +- .../src/generic/benchmarking.rs | 2 +- polkadot/xcm/pallet-xcm/src/mock.rs | 8 +- polkadot/xcm/xcm-builder/src/fee_handling.rs | 117 ++++++++++---- polkadot/xcm/xcm-builder/src/lib.rs | 4 +- polkadot/xcm/xcm-builder/src/tests/mock.rs | 3 +- polkadot/xcm/xcm-executor/src/lib.rs | 6 +- .../xcm-executor/src/traits/fee_manager.rs | 9 +- 14 files changed, 275 insertions(+), 71 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 222e96d59d17..83fa31abd7f5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -53,7 +53,7 @@ use xcm_builder::{ SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeesToAccount, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; @@ -75,7 +75,7 @@ parameter_types! { PalletInstance(::index() as u8).into(); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); - pub TreasuryAccount: Option = Some(TREASURY_PALLET_ID.into_account_truncating()); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } @@ -619,7 +619,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = XcmFeesToAccount; + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = (bridging::to_wococo::UniversalAliases, bridging::to_rococo::UniversalAliases); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index b93315cc39d8..18f5f4fc41ec 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -683,7 +683,7 @@ mod asset_hub_rococo_tests { bridging_to_asset_hub_wococo, WeightLimit::Unlimited, Some(xcm_config::bridging::XcmBridgeHubRouterFeeAssetId::get()), - Some(xcm_config::TreasuryAccount::get().unwrap()), + Some(xcm_config::TreasuryAccount::get()), ) } @@ -871,7 +871,7 @@ mod asset_hub_wococo_tests { with_wococo_flavor_bridging_to_asset_hub_rococo, WeightLimit::Unlimited, Some(xcm_config::bridging::XcmBridgeHubRouterFeeAssetId::get()), - Some(xcm_config::TreasuryAccount::get().unwrap()), + Some(xcm_config::TreasuryAccount::get()), ) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index fe0fd613d220..6b5ce904da9a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -52,7 +52,7 @@ use xcm_builder::{ SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeesToAccount, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; @@ -73,7 +73,7 @@ parameter_types! { pub PoolAssetsPalletLocation: MultiLocation = PalletInstance(::index() as u8).into(); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); - pub TreasuryAccount: Option = Some(TREASURY_PALLET_ID.into_account_truncating()); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(westend_runtime_constants::TREASURY_PALLET_ID)).into(); } @@ -562,7 +562,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = XcmFeesToAccount; + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = WithOriginFilter; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 2456a7ee63a8..dc6f61d01465 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -24,9 +24,18 @@ use crate::{ BridgeGrandpaRococoInstance, BridgeGrandpaWococoInstance, DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, }, - bridge_hub_rococo_config::ToBridgeHubWococoHaulBlobExporter, - bridge_hub_wococo_config::ToBridgeHubRococoHaulBlobExporter, + bridge_hub_rococo_config::{ + AssetHubRococoParaId, BridgeHubWococoChainId, BridgeHubWococoMessagesLane, + ToBridgeHubWococoHaulBlobExporter, WococoGlobalConsensusNetwork, + }, + bridge_hub_wococo_config::{ + AssetHubWococoParaId, BridgeHubRococoChainId, BridgeHubRococoMessagesLane, + RococoGlobalConsensusNetwork, ToBridgeHubRococoHaulBlobExporter, + }, }; +use bp_messages::LaneId; +use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; +use bp_runtime::ChainId; use frame_support::{ match_types, parameter_types, traits::{ConstU32, Contains, Equals, Everything, Nothing}, @@ -43,18 +52,20 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use rococo_runtime_constants::system_parachain; use sp_core::Get; use sp_runtime::traits::AccountIdConversion; +use sp_std::marker::PhantomData; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeesToAccount, + deposit_or_burn_fee, AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, HandleFee, + IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::{ - traits::{ExportXcm, WithOriginFilter}, + traits::{ExportXcm, FeeReason, TransactAsset, WithOriginFilter}, XcmExecutor, }; @@ -66,7 +77,7 @@ parameter_types! { X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; - pub TreasuryAccount: Option = Some(TREASURY_PALLET_ID.into_account_truncating()); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } @@ -290,7 +301,26 @@ impl xcm_executor::Config for XcmConfig { type SubscriptionService = PolkadotXcm; type PalletInstancesInfo = AllPalletsWithSystem; type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = XcmFeesToAccount; + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + ( + XcmExportFeeToRelayerRewardAccounts< + Self::AssetTransactor, + WococoGlobalConsensusNetwork, + AssetHubWococoParaId, + BridgeHubWococoChainId, + BridgeHubWococoMessagesLane, + >, + XcmExportFeeToRelayerRewardAccounts< + Self::AssetTransactor, + RococoGlobalConsensusNetwork, + AssetHubRococoParaId, + BridgeHubRococoChainId, + BridgeHubRococoMessagesLane, + >, + XcmFeeToAccount, + ), + >; type MessageExporter = BridgeHubRococoOrBridgeHubWococoSwitchExporter; type UniversalAliases = Nothing; type CallDispatcher = WithOriginFilter; @@ -401,3 +431,96 @@ impl ExportXcm for BridgeHubRococoOrBridgeHubWococoSwitchExporter { } } } + +/// A `HandleFee` implementation that simply deposits the fees for `ExportMessage` XCM instructions +/// into the accounts that are used for paying the relayer rewards. +/// Burns the fees in case of a failure. +pub struct XcmExportFeeToRelayerRewardAccounts< + AssetTransactor, + DestNetwork, + DestParaId, + DestBridgeHubId, + BridgeLaneId, +>(PhantomData<(AssetTransactor, DestNetwork, DestParaId, DestBridgeHubId, BridgeLaneId)>); + +impl< + AssetTransactor: TransactAsset, + DestNetwork: Get, + DestParaId: Get, + DestBridgeHubId: Get, + BridgeLaneId: Get, + > HandleFee + for XcmExportFeeToRelayerRewardAccounts< + AssetTransactor, + DestNetwork, + DestParaId, + DestBridgeHubId, + BridgeLaneId, + > +{ + fn handle_fee( + fee: MultiAssets, + maybe_context: Option<&XcmContext>, + reason: FeeReason, + ) -> MultiAssets { + if matches!(reason, FeeReason::Export { network: bridged_network, destination } + if bridged_network == DestNetwork::get() && + destination == X1(Parachain(DestParaId::get().into()))) + { + // We have 2 relayer rewards accounts: + // - the SA of the source parachain on this BH: this pays the relayers for delivering + // Source para -> Target Para message delivery confirmations + // - the SA of the destination parachain on this BH: this pays the relayers for + // delivering Target para -> Source Para messages + // We split the `ExportMessage` fee between these 2 accounts. + let source_para_account = PayRewardFromAccount::< + pallet_balances::Pallet, + AccountId, + >::rewards_account(RewardsAccountParams::new( + BridgeLaneId::get(), + DestBridgeHubId::get(), + RewardsAccountOwner::ThisChain, + )); + + let dest_para_account = PayRewardFromAccount::< + pallet_balances::Pallet, + AccountId, + >::rewards_account(RewardsAccountParams::new( + BridgeLaneId::get(), + DestBridgeHubId::get(), + RewardsAccountOwner::BridgedChain, + )); + + for asset in fee.into_inner() { + match asset.fun { + Fungible(total_fee) => { + let source_fee = total_fee / 2; + deposit_or_burn_fee::( + MultiAsset { id: asset.id, fun: Fungible(source_fee) }.into(), + maybe_context, + source_para_account.clone(), + ); + + let dest_fee = total_fee - source_fee; + deposit_or_burn_fee::( + MultiAsset { id: asset.id, fun: Fungible(dest_fee) }.into(), + maybe_context, + dest_para_account.clone(), + ); + }, + NonFungible(_) => { + deposit_or_burn_fee::( + asset.into(), + maybe_context, + source_para_account.clone(), + ); + }, + } + } + + return MultiAssets::new() + } + + fee + } +} diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 6ff60b958fed..ebb3de740b9e 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -41,7 +41,7 @@ use xcm_builder::{ NativeAsset, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WithComputedOrigin, WithUniqueTopic, XcmFeesToAccount, + WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -51,7 +51,7 @@ parameter_types! { pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorMultiLocation = Parachain(ParachainInfo::parachain_id().into()).into(); pub const ExecutiveBody: BodyId = BodyId::Executive; - pub TreasuryAccount: Option = Some(TREASURY_PALLET_ID.into_account_truncating()); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } @@ -199,7 +199,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = ConstU32<8>; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = XcmFeesToAccount; + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index fb1653c549e1..0814b77414f2 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -43,7 +43,8 @@ use xcm_builder::{ DescribeFamily, FixedWeightBounds, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeesToAccount, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -53,7 +54,7 @@ parameter_types! { pub UniversalLocation: InteriorMultiLocation = ThisNetwork::get().into(); pub CheckAccount: AccountId = XcmPallet::check_account(); pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); - pub TreasuryAccount: Option = Some(Treasury::account_id()); + pub TreasuryAccount: AccountId = Treasury::account_id(); } pub type LocationConverter = ( @@ -191,7 +192,10 @@ impl xcm_executor::Config for XcmConfig { type SubscriptionService = XcmPallet; type PalletInstancesInfo = AllPalletsWithSystem; type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = XcmFeesToAccount; + type FeeManager = XcmFeeManagerFromComponents< + SystemParachains, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index dd6a29885ad6..64e07317fc74 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -44,7 +44,7 @@ use xcm_builder::{ DescribeFamily, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeesToAccount, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -54,7 +54,7 @@ parameter_types! { pub const UniversalLocation: InteriorMultiLocation = X1(GlobalConsensus(ThisNetwork::get())); pub CheckAccount: AccountId = XcmPallet::check_account(); pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); - pub TreasuryAccount: Option = Some(Treasury::account_id()); + pub TreasuryAccount: AccountId = Treasury::account_id(); /// The asset ID for the asset that we use to pay for message delivery fees. pub FeeAssetId: AssetId = Concrete(TokenLocation::get()); /// The base fee for the message delivery fees. @@ -185,7 +185,10 @@ impl xcm_executor::Config for XcmConfig { type SubscriptionService = XcmPallet; type PalletInstancesInfo = AllPalletsWithSystem; type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = XcmFeesToAccount; + type FeeManager = XcmFeeManagerFromComponents< + SystemParachains, + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index c6b76e0ffade..4a997666027f 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -561,7 +561,7 @@ benchmarks! { let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( &origin, &destination.into(), - FeeReason::Export(network), + FeeReason::Export { network, destination }, ); let sender_account = T::AccountIdConverter::convert_location(&origin).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index afa956c3cdae..3b41ad90ec99 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -34,7 +34,7 @@ use xcm_builder::{ AllowTopLevelPaidExecutionFrom, Case, ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, FixedWeightBounds, IsConcrete, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, XcmFeesToAccount, + SovereignSignedViaLocation, TakeWeightCredit, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -343,11 +343,9 @@ impl xcm_executor::Config for XcmConfig { type SubscriptionService = XcmPallet; type PalletInstancesInfo = AllPalletsWithSystem; type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type FeeManager = XcmFeesToAccount< - Self, + type FeeManager = XcmFeeManagerFromComponents< EverythingBut, - AccountId, - XcmFeesTargetAccount, + XcmFeeToAccount, >; type MessageExporter = (); type UniversalAliases = Nothing; diff --git a/polkadot/xcm/xcm-builder/src/fee_handling.rs b/polkadot/xcm/xcm-builder/src/fee_handling.rs index 1386747c9778..c158d5d862d7 100644 --- a/polkadot/xcm/xcm-builder/src/fee_handling.rs +++ b/polkadot/xcm/xcm-builder/src/fee_handling.rs @@ -19,40 +19,103 @@ use frame_support::traits::{Contains, Get}; use xcm::prelude::*; use xcm_executor::traits::{FeeManager, FeeReason, TransactAsset}; -/// A `FeeManager` implementation that simply deposits the fees handled into a specific on-chain -/// `ReceiverAccount`. -/// -/// It reuses the `AssetTransactor` configured on the XCM executor to deposit fee assets, and also -/// permits specifying `WaivedLocations` for locations that are privileged to not pay for fees. If -/// the `AssetTransactor` returns an error while calling `deposit_asset`, then a warning will be -/// logged. -pub struct XcmFeesToAccount( - PhantomData<(XcmConfig, WaivedLocations, AccountId, ReceiverAccount)>, +/// Handles the fees that are taken by certain XCM instructions. +pub trait HandleFee { + /// Do something with the fee which has been paid. Doing nothing here silently burns the + /// fees. + /// + /// Returns any part of the fee that wasn't consumed. + fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>, reason: FeeReason) + -> MultiAssets; +} + +// Default `HandleFee` implementation that just burns the fee. +impl HandleFee for () { + fn handle_fee(_: MultiAssets, _: Option<&XcmContext>, _: FeeReason) -> MultiAssets { + MultiAssets::new() + } +} + +#[impl_trait_for_tuples::impl_for_tuples(1, 30)] +impl HandleFee for Tuple { + fn handle_fee( + fee: MultiAssets, + context: Option<&XcmContext>, + reason: FeeReason, + ) -> MultiAssets { + let mut unconsumed_fee = fee; + for_tuples!( #( + unconsumed_fee = Tuple::handle_fee(unconsumed_fee, context, reason); + if unconsumed_fee.is_none() { + return unconsumed_fee; + } + )* ); + + unconsumed_fee + } +} + +/// A `FeeManager` implementation that permits the specified `WaivedLocations` to not pay for fees +/// and that uses the provided `HandleFee` implementation otherwise. +pub struct XcmFeeManagerFromComponents( + PhantomData<(WaivedLocations, HandleFee)>, ); -impl< - XcmConfig: xcm_executor::Config, - WaivedLocations: Contains, - AccountId: Clone + Into<[u8; 32]>, - ReceiverAccount: Get>, - > FeeManager for XcmFeesToAccount +impl, FeeHandler: HandleFee> FeeManager + for XcmFeeManagerFromComponents { fn is_waived(origin: Option<&MultiLocation>, _: FeeReason) -> bool { let Some(loc) = origin else { return false }; WaivedLocations::contains(loc) } - fn handle_fee(fees: MultiAssets, context: Option<&XcmContext>) { - if let Some(receiver) = ReceiverAccount::get() { - let dest = AccountId32 { network: None, id: receiver.into() }.into(); - for asset in fees.into_inner() { - if let Err(e) = XcmConfig::AssetTransactor::deposit_asset(&asset, &dest, context) { - log::trace!( - target: "xcm::fees", - "`AssetTransactor::deposit_asset` returned error: {:?}, burning fees: {:?}", - e, asset, - ); - } - } + fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>, reason: FeeReason) { + FeeHandler::handle_fee(fee, context, reason); + } +} + +/// Try to deposit the given fee in the specified account. +/// Burns the fee in case of a failure. +pub fn deposit_or_burn_fee>( + fee: MultiAssets, + context: Option<&XcmContext>, + receiver: AccountId, +) { + let dest = AccountId32 { network: None, id: receiver.into() }.into(); + for asset in fee.into_inner() { + if let Err(e) = AssetTransactor::deposit_asset(&asset, &dest, context) { + log::trace!( + target: "xcm::fees", + "`AssetTransactor::deposit_asset` returned error: {:?}. Burning fee: {:?}. \ + They might be burned.", + e, asset, + ); } } } + +/// A `HandleFee` implementation that simply deposits the fees into a specific on-chain +/// `ReceiverAccount`. +/// +/// It reuses the `AssetTransactor` configured on the XCM executor to deposit fee assets. If +/// the `AssetTransactor` returns an error while calling `deposit_asset`, then a warning will be +/// logged and the fee burned. +pub struct XcmFeeToAccount( + PhantomData<(AssetTransactor, AccountId, ReceiverAccount)>, +); + +impl< + AssetTransactor: TransactAsset, + AccountId: Clone + Into<[u8; 32]>, + ReceiverAccount: Get, + > HandleFee for XcmFeeToAccount +{ + fn handle_fee( + fee: MultiAssets, + context: Option<&XcmContext>, + _reason: FeeReason, + ) -> MultiAssets { + deposit_or_burn_fee::(fee, context, ReceiverAccount::get()); + + MultiAssets::new() + } +} diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 34371398cdc3..0a74b3f579ae 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -68,7 +68,9 @@ mod currency_adapter; pub use currency_adapter::CurrencyAdapter; mod fee_handling; -pub use fee_handling::XcmFeesToAccount; +pub use fee_handling::{ + deposit_or_burn_fee, HandleFee, XcmFeeManagerFromComponents, XcmFeeToAccount, +}; mod fungibles_adapter; pub use fungibles_adapter::{ diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 7a7c8837fc1c..543b00e0118c 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -526,7 +526,8 @@ impl FeeManager for TestFeeManager { fn is_waived(_: Option<&MultiLocation>, r: FeeReason) -> bool { IS_WAIVED.with(|l| l.borrow().contains(&r)) } - fn handle_fee(_: MultiAssets, _: Option<&XcmContext>) {} + + fn handle_fee(_: MultiAssets, _: Option<&XcmContext>, _: FeeReason) {} } #[derive(Clone, Eq, PartialEq, Debug)] diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index e11ec2630e43..e43d7a048992 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -248,7 +248,7 @@ impl ExecuteXcm for XcmExecutor XcmExecutor { destination, xcm, )?; - self.take_fee(fee, FeeReason::Export(network))?; + self.take_fee(fee, FeeReason::Export { network, destination })?; Config::MessageExporter::deliver(ticket)?; Ok(()) }, @@ -962,7 +962,7 @@ impl XcmExecutor { } else { self.holding.try_take(fee.into()).map_err(|_| XcmError::NotHoldingFees)?.into() }; - Config::FeeManager::handle_fee(paid, Some(&self.context)); + Config::FeeManager::handle_fee(paid, Some(&self.context), reason); Ok(()) } diff --git a/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs b/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs index 2b2f21927f2e..d7146457f3b9 100644 --- a/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs +++ b/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs @@ -18,12 +18,12 @@ use xcm::prelude::*; /// Handle stuff to do with taking fees in certain XCM instructions. pub trait FeeManager { - /// Determine if a fee which would normally payable should be waived. + /// Determine if a fee should be waived. fn is_waived(origin: Option<&MultiLocation>, r: FeeReason) -> bool; /// Do something with the fee which has been paid. Doing nothing here silently burns the /// fees. - fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>); + fn handle_fee(fee: MultiAssets, context: Option<&XcmContext>, r: FeeReason); } /// Context under which a fee is paid. @@ -42,7 +42,7 @@ pub enum FeeReason { /// When the `QueryPallet` instruction is called. QueryPallet, /// When the `ExportMessage` instruction is called (and includes the network ID). - Export(NetworkId), + Export { network: NetworkId, destination: InteriorMultiLocation }, /// The `charge_fees` API. ChargeFees, /// When the `LockAsset` instruction is called. @@ -55,5 +55,6 @@ impl FeeManager for () { fn is_waived(_: Option<&MultiLocation>, _: FeeReason) -> bool { false } - fn handle_fee(_: MultiAssets, _: Option<&XcmContext>) {} + + fn handle_fee(_: MultiAssets, _: Option<&XcmContext>, _: FeeReason) {} } From b2bb8cbcf3cea421de48bebd3e5a3400964fdf7e Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Wed, 1 Nov 2023 16:16:13 +0100 Subject: [PATCH 61/69] review-bot: prevent request review of core-devs (#2121) This will remove `core-devs` from being required reviewers of PRs, --- .github/review-bot.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/review-bot.yml b/.github/review-bot.yml index 581e33762608..b053ead37fb7 100644 --- a/.github/review-bot.yml +++ b/.github/review-bot.yml @@ -118,3 +118,7 @@ rules: - minApprovals: 1 teams: - ci + +preventReviewRequests: + teams: + - core-devs From b6965af493eaa277bf7983201311362cf62ce5a9 Mon Sep 17 00:00:00 2001 From: Kevin Krone Date: Wed, 1 Nov 2023 16:28:02 +0100 Subject: [PATCH 62/69] Improve FRAME storage docs (#1714) This is a port (and hopefully a small improvement) of @kianenigma's PR from the old Substrate repo: https://github.com/paritytech/substrate/pull/13987. Following #1689 I moved the documentation of all macros relevant to this PR from `frame_support_procedural` to `pallet_macros` while including a hint for RA users. Question: Again with respect to #1689: Is there a good reason why we should *not* enhance paths with links to our current rustdocs? For example, instead of ```rust /// **Rust-Analyzer users**: See the documentation of the Rust item in /// `frame_support::pallet_macros::storage`. ``` we could write ```rust /// **Rust-Analyzer users**: See the documentation of the Rust item in /// [`frame_support::pallet_macros::storage`](https://paritytech.github.io/polkadot-sdk/master/frame_support/pallet_macros/attr.storage.html). ``` This results in a clickable link like this: image I don't really expect the links to become outdated any time soon, but I think this would be a great UX improvement over just having paths. TODOs: - [ ] Add documentation for `constant_name` macro - [x] Add proper documentation for different `QueryKinds`, i.e. `OptionQuery`, `ValueQuery`, `ResultQuery`. One example for each. Custom `OnEmpty` should be moved to `QueryKinds` trait doc page. - [ ] Rework `type_value` docs --------- Co-authored-by: kianenigma --- substrate/frame/bags-list/Cargo.toml | 30 +- substrate/frame/support/procedural/src/lib.rs | 94 ++--- substrate/frame/support/src/lib.rs | 330 +++++++++++++++++- .../support/src/storage/types/counted_map.rs | 60 +++- .../support/src/storage/types/counted_nmap.rs | 47 ++- .../support/src/storage/types/double_map.rs | 95 ++++- .../frame/support/src/storage/types/map.rs | 46 ++- .../frame/support/src/storage/types/mod.rs | 82 ++++- .../frame/support/src/storage/types/nmap.rs | 60 +++- .../frame/support/src/storage/types/value.rs | 33 +- 10 files changed, 741 insertions(+), 136 deletions(-) diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 05b86f6c7239..b99726ebf2dd 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -13,17 +13,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } # primitives -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } # FRAME -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -frame-election-provider-support = { path = "../election-provider-support", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +frame-election-provider-support = { path = "../election-provider-support", default-features = false } # third party log = { version = "0.4.17", default-features = false } @@ -31,11 +35,11 @@ docify = "0.2.6" aquamarine = { version = "0.3.2" } # Optional imports for benchmarking -frame-benchmarking = { path = "../benchmarking", default-features = false , optional = true} -pallet-balances = { path = "../balances", default-features = false , optional = true} -sp-core = { path = "../../primitives/core", default-features = false , optional = true} -sp-io = { path = "../../primitives/io", default-features = false , optional = true} -sp-tracing = { path = "../../primitives/tracing", default-features = false , optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +pallet-balances = { path = "../balances", default-features = false, optional = true } +sp-core = { path = "../../primitives/core", default-features = false, optional = true } +sp-io = { path = "../../primitives/io", default-features = false, optional = true } +sp-tracing = { path = "../../primitives/tracing", default-features = false, optional = true } [dev-dependencies] sp-core = { path = "../../primitives/core" } @@ -46,7 +50,7 @@ frame-election-provider-support = { path = "../election-provider-support" } frame-benchmarking = { path = "../benchmarking" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 9c551b9f2306..68bf3e4874be 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -979,21 +979,26 @@ pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by `Get` -/// from [`pallet::config`](`macro@config`) into metadata, e.g.: /// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; -/// } -/// ``` +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::constant`. #[proc_macro_attribute] pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::constant_name`. +#[proc_macro_attribute] +pub fn constant_name(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// To bypass the `frame_system::Config` supertrait check, use the attribute /// `pallet::disable_frame_system_supertrait_check`, e.g.: /// @@ -1099,6 +1104,16 @@ pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::call`. +#[proc_macro_attribute] +pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, /// which explicitly defines the codec index for the dispatchable function in the `Call` enum. /// @@ -1268,60 +1283,11 @@ pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime -/// storage and also set its metadata. This attribute can be used multiple times. /// -/// Item should be defined as: +/// --- /// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; -/// ``` -/// -/// or with unnamed generic: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<_, $some_generics, ...>; -/// ``` -/// -/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be -/// one of `StorageValue`, `StorageMap` or `StorageDoubleMap`. The generic arguments of the -/// storage type can be given in two manners: named and unnamed. For named generic arguments, -/// the name for each argument should match the name defined for it on the storage struct: -/// * `StorageValue` expects `Value` and optionally `QueryKind` and `OnEmpty`, -/// * `StorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, -/// * `CountedStorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, -/// * `StorageDoubleMap` expects `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` and optionally -/// `QueryKind` and `OnEmpty`. -/// -/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the -/// macro and other generic must declared as a normal generic type declaration. -/// -/// The `Prefix` generic written by the macro is generated using -/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: -/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. -/// -/// For the `CountedStorageMap` variant, the `Prefix` also implements -/// `CountedStorageMapInstance`. It also associates a `CounterPrefix`, which is implemented the -/// same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = CountedStorageaMap<...>` will store -/// its counter at the prefix: `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ -/// Twox128(b"OtherName")`. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::storage`. #[proc_macro_attribute] pub fn storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1424,6 +1390,9 @@ pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// /// **Rust-Analyzer users**: See the documentation of the Rust item in /// `frame_support::pallet_macros::genesis_config`. #[proc_macro_attribute] @@ -1431,6 +1400,9 @@ pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// /// **Rust-Analyzer users**: See the documentation of the Rust item in /// `frame_support::pallet_macros::genesis_build`. #[proc_macro_attribute] diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 1c696bbb84ac..4888b8996d1a 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2199,11 +2199,10 @@ pub use frame_support_procedural::pallet; /// Contains macro stubs for all of the pallet:: macros pub mod pallet_macros { pub use frame_support_procedural::{ - call_index, compact, composite_enum, config, constant, - disable_frame_system_supertrait_check, error, event, extra_constants, generate_deposit, - generate_store, getter, hooks, import_section, inherent, no_default, no_default_bounds, - origin, pallet_section, storage, storage_prefix, storage_version, type_value, unbounded, - validate_unsigned, weight, whitelist_storage, + call_index, compact, composite_enum, config, disable_frame_system_supertrait_check, error, + event, extra_constants, generate_deposit, generate_store, getter, hooks, import_section, + inherent, no_default, no_default_bounds, origin, pallet_section, storage_prefix, + storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, }; /// Allows you to define the genesis configuration for the pallet. @@ -2220,7 +2219,7 @@ pub mod pallet_macros { /// /// The fields of the `GenesisConfig` can in turn be populated by the chain-spec. /// - /// ## Example: + /// ## Example /// /// ``` /// #[frame_support::pallet] @@ -2275,6 +2274,230 @@ pub mod pallet_macros { /// } /// ``` pub use frame_support_procedural::genesis_build; + + /// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded + /// by [`Get`](frame_support::pallet_prelude::Get) from [`pallet::config`](`macro@config`) + /// into metadata. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// use frame_support::pallet_prelude::*; + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// #[pallet::config] + /// pub trait Config: frame_system::Config { + /// /// This is like a normal `Get` trait, but it will be added into metadata. + /// #[pallet::constant] + /// type Foo: Get; + /// } + /// } + /// ``` + pub use frame_support_procedural::constant; + + /// Declares a type alias as a storage item. Storage items are pointers to data stored + /// on-chain (the *blockchain state*), under a specific key. The exact key is dependent on + /// the type of the storage. + /// + /// > From the perspective of this pallet, the entire blockchain state is abstracted behind + /// > a key-value api, namely [`sp_io::storage`]. + /// + /// ## Storage Types + /// + /// The following storage types are supported by the `#[storage]` macro. For specific + /// information about each storage type, refer to the documentation of the respective type. + /// + /// * [`StorageValue`](crate::storage::types::StorageValue) + /// * [`StorageMap`](crate::storage::types::StorageMap) + /// * [`CountedStorageMap`](crate::storage::types::CountedStorageMap) + /// * [`StorageDoubleMap`](crate::storage::types::StorageDoubleMap) + /// * [`StorageNMap`](crate::storage::types::StorageNMap) + /// * [`CountedStorageNMap`](crate::storage::types::CountedStorageNMap) + /// + /// ## Storage Type Usage + /// + /// The following details are relevant to all of the aforementioned storage types. + /// Depending on the exact storage type, it may require the following generic parameters: + /// + /// * [`Prefix`](#prefixes) - Used to give the storage item a unique key in the underlying + /// storage. + /// * `Key` - Type of the keys used to store the values, + /// * `Value` - Type of the value being stored, + /// * [`Hasher`](#hashers) - Used to ensure the keys of a map are uniformly distributed, + /// * [`QueryKind`](#querykind) - Used to configure how to handle queries to the underlying + /// storage, + /// * `OnEmpty` - Used to handle missing values when querying the underlying storage, + /// * `MaxValues` - _not currently used_. + /// + /// Each `Key` type requires its own designated `Hasher` declaration, so that + /// [`StorageDoubleMap`](frame_support::storage::types::StorageDoubleMap) needs two of + /// each, and [`StorageNMap`](frame_support::storage::types::StorageNMap) needs `N` such + /// pairs. Since [`StorageValue`](frame_support::storage::types::StorageValue) only stores + /// a single element, no configuration of hashers is needed. + /// + /// ### Syntax + /// + /// Two general syntaxes are supported, as demonstrated below: + /// + /// 1. Named type parameters, e.g., `type Foo = StorageValue`. + /// 2. Positional type parameters, e.g., `type Foo = StorageValue<_, u32>`. + /// + /// In both instances, declaring the generic parameter `` is mandatory. Optionally, it + /// can also be explicitly declared as ``. In the compiled code, `T` will + /// automatically include the trait bound `Config`. + /// + /// Note that in positional syntax, the first generic type parameter must be `_`. + /// + /// #### Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// /// Positional syntax, without bounding `T`. + /// #[pallet::storage] + /// pub type Foo = StorageValue<_, u32>; + /// + /// /// Positional syntax, with bounding `T`. + /// #[pallet::storage] + /// pub type Bar = StorageValue<_, u32>; + /// + /// /// Named syntax. + /// #[pallet::storage] + /// pub type Baz = StorageMap; + /// } + /// ``` + /// + /// ### QueryKind + /// + /// Every storage type mentioned above has a generic type called + /// [`QueryKind`](frame_support::storage::types::QueryKindTrait) that determines its + /// "query" type. This refers to the kind of value returned when querying the storage, for + /// instance, through a `::get()` method. + /// + /// There are three types of queries: + /// + /// 1. [`OptionQuery`](frame_support::storage::types::OptionQuery): The default query type. + /// It returns `Some(V)` if the value is present, or `None` if it isn't, where `V` is + /// the value type. + /// 2. [`ValueQuery`](frame_support::storage::types::ValueQuery): Returns the value itself + /// if present; otherwise, it returns `Default::default()`. This behavior can be + /// adjusted with the `OnEmpty` generic parameter, which defaults to `OnEmpty = + /// GetDefault`. + /// 3. [`ResultQuery`](frame_support::storage::types::ResultQuery): Returns `Result`, + /// where `V` is the value type. + /// + /// See [`QueryKind`](frame_support::storage::types::QueryKindTrait) for further examples. + /// + /// ### Optimized Appending + /// + /// All storage items — such as + /// [`StorageValue`](frame_support::storage::types::StorageValue), + /// [`StorageMap`](frame_support::storage::types::StorageMap), and their variants—offer an + /// `::append()` method optimized for collections. Using this method avoids the + /// inefficiency of decoding and re-encoding entire collections when adding items. For + /// instance, consider the storage declaration `type MyVal = StorageValue<_, Vec, + /// ValueQuery>`. With `MyVal` storing a large list of bytes, `::append()` lets you + /// directly add bytes to the end in storage without processing the full list. Depending on + /// the storage type, additional key specifications may be needed. + /// + /// #### Example + #[doc = docify::embed!("src/lib.rs", example_storage_value_append)] + /// Similarly, there also exists a `::try_append()` method, which can be used when handling + /// types where an append operation might fail, such as a + /// [`BoundedVec`](frame_support::BoundedVec). + /// + /// #### Example + #[doc = docify::embed!("src/lib.rs", example_storage_value_try_append)] + /// ### Optimized Length Decoding + /// + /// All storage items — such as + /// [`StorageValue`](frame_support::storage::types::StorageValue), + /// [`StorageMap`](frame_support::storage::types::StorageMap), and their counterparts — + /// incorporate the `::decode_len()` method. This method allows for efficient retrieval of + /// a collection's length without the necessity of decoding the entire dataset. + /// #### Example + #[doc = docify::embed!("src/lib.rs", example_storage_value_decode_len)] + /// ### Hashers + /// + /// For all storage types, except + /// [`StorageValue`](frame_support::storage::types::StorageValue), a set of hashers needs + /// to be specified. The choice of hashers is crucial, especially in production chains. The + /// purpose of storage hashers in maps is to ensure the keys of a map are + /// uniformly distributed. An unbalanced map/trie can lead to inefficient performance. + /// + /// In general, hashers are categorized as either cryptographically secure or not. The + /// former is slower than the latter. `Blake2` and `Twox` serve as examples of each, + /// respectively. + /// + /// As a rule of thumb: + /// + /// 1. If the map keys are not controlled by end users, or are cryptographically secure by + /// definition (e.g., `AccountId`), then the use of cryptographically secure hashers is NOT + /// required. + /// 2. If the map keys are controllable by the end users, cryptographically secure hashers + /// should be used. + /// + /// For more information, look at the types that implement + /// [`frame_support::StorageHasher`](frame_support::StorageHasher). + /// + /// Lastly, it's recommended for hashers with "concat" to have reversible hashes. Refer to + /// the implementors section of + /// [`hash::ReversibleStorageHasher`](frame_support::hash::ReversibleStorageHasher). + /// + /// ### Prefixes + /// + /// Internally, every storage type generates a "prefix". This prefix serves as the initial + /// segment of the key utilized to store values in the on-chain state (i.e., the final key + /// used in [`sp_io::storage`](sp_io::storage)). For all storage types, the following rule + /// applies: + /// + /// > The storage prefix begins with `twox128(pallet_prefix) ++ twox128(STORAGE_PREFIX)`, + /// > where + /// > `pallet_prefix` is the name assigned to the pallet instance in + /// > [`frame_support::construct_runtime`](frame_support::construct_runtime), and + /// > `STORAGE_PREFIX` is the name of the `type` aliased to a particular storage type, such + /// > as + /// > `Foo` in `type Foo = StorageValue<..>`. + /// + /// For [`StorageValue`](frame_support::storage::types::StorageValue), no additional key is + /// required. For map types, the prefix is extended with one or more keys defined by the + /// map. + /// + /// #### Example + #[doc = docify::embed!("src/lib.rs", example_storage_value_map_prefixes)] + /// ## Related Macros + /// + /// The following attribute macros can be used in conjunction with the `#[storage]` macro: + /// + /// * [`macro@getter`]: Creates a custom getter function. + /// * [`macro@storage_prefix`]: Overrides the default prefix of the storage item. + /// * [`macro@unbounded`]: Declares the storage item as unbounded. + /// + /// #### Example + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// /// A kitchen-sink StorageValue, with all possible additional attributes. + /// #[pallet::storage] + /// #[pallet::getter(fn foo)] + /// #[pallet::storage_prefix = "OtherFoo"] + /// #[pallet::unbounded] + /// pub type Foo = StorageValue<_, u32, ValueQuery>; + /// } + /// ``` + pub use frame_support_procedural::storage; } #[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] @@ -2291,3 +2514,98 @@ sp_core::generate_feature_enabled_macro!(std_enabled, feature = "std", $); // Helper for implementing GenesisBuilder runtime API pub mod genesis_builder_helper; + +#[cfg(test)] +mod test { + // use super::*; + use crate::{ + hash::*, + storage::types::{StorageMap, StorageValue, ValueQuery}, + traits::{ConstU32, StorageInstance}, + BoundedVec, + }; + use sp_io::{hashing::twox_128, TestExternalities}; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct Prefix1; + impl StorageInstance for Prefix1 { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "MyVal"; + } + struct Prefix2; + impl StorageInstance for Prefix2 { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "MyMap"; + } + + #[docify::export] + #[test] + pub fn example_storage_value_try_append() { + type MyVal = StorageValue>, ValueQuery>; + + TestExternalities::default().execute_with(|| { + MyVal::set(BoundedVec::try_from(vec![42, 43]).unwrap()); + assert_eq!(MyVal::get(), vec![42, 43]); + // Try to append a single u32 to BoundedVec stored in `MyVal` + assert_ok!(MyVal::try_append(40)); + assert_eq!(MyVal::get(), vec![42, 43, 40]); + }); + } + + #[docify::export] + #[test] + pub fn example_storage_value_append() { + type MyVal = StorageValue, ValueQuery>; + + TestExternalities::default().execute_with(|| { + MyVal::set(vec![42, 43]); + assert_eq!(MyVal::get(), vec![42, 43]); + // Append a single u32 to Vec stored in `MyVal` + MyVal::append(40); + assert_eq!(MyVal::get(), vec![42, 43, 40]); + }); + } + + #[docify::export] + #[test] + pub fn example_storage_value_decode_len() { + type MyVal = StorageValue>, ValueQuery>; + + TestExternalities::default().execute_with(|| { + MyVal::set(BoundedVec::try_from(vec![42, 43]).unwrap()); + assert_eq!(MyVal::decode_len().unwrap(), 2); + }); + } + + #[docify::export] + #[test] + pub fn example_storage_value_map_prefixes() { + type MyVal = StorageValue; + type MyMap = StorageMap; + TestExternalities::default().execute_with(|| { + // This example assumes `pallet_prefix` to be "test" + // Get storage key for `MyVal` StorageValue + assert_eq!( + MyVal::hashed_key().to_vec(), + [twox_128(b"test"), twox_128(b"MyVal")].concat() + ); + // Get storage key for `MyMap` StorageMap and `key` = 1 + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"MyMap")); + k.extend(&1u16.blake2_128_concat()); + assert_eq!(MyMap::hashed_key_for(1).to_vec(), k); + }); + } +} diff --git a/substrate/frame/support/src/storage/types/counted_map.rs b/substrate/frame/support/src/storage/types/counted_map.rs index 50e2c678248c..75fbdf2617d1 100644 --- a/substrate/frame/support/src/storage/types/counted_map.rs +++ b/substrate/frame/support/src/storage/types/counted_map.rs @@ -35,8 +35,8 @@ use sp_metadata_ir::StorageEntryMetadataIR; use sp_runtime::traits::Saturating; use sp_std::prelude::*; -/// A wrapper around a `StorageMap` and a `StorageValue` to keep track of how many items -/// are in a map, without needing to iterate all the values. +/// A wrapper around a [`StorageMap`] and a [`StorageValue`] (with the value being `u32`) to keep +/// track of how many items are in a map, without needing to iterate all the values. /// /// This storage item has additional storage read and write overhead when manipulating values /// compared to a regular storage map. @@ -47,6 +47,51 @@ use sp_std::prelude::*; /// /// Whenever the counter needs to be updated, an additional read and write occurs to update that /// counter. +/// +/// The total number of items currently stored in the map can be retrieved with the +/// [`CountedStorageMap::count`] method. +/// +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Examples +/// +/// Declaring a counted map: +/// +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink CountedStorageMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = CountedStorageMap< +/// _, +/// Blake2_128Concat, +/// u32, +/// u32, +/// ValueQuery, +/// >; +/// +/// /// Alternative named syntax. +/// #[pallet::storage] +/// pub type Bar = CountedStorageMap< +/// Hasher = Blake2_128Concat, +/// Key = u32, +/// Value = u32, +/// QueryKind = ValueQuery +/// >; +/// } +/// ``` +/// +/// Using a counted map in action: +#[doc = docify::embed!("src/storage/types/counted_map.rs", test_simple_count_works)] pub struct CountedStorageMap< Prefix, Hasher, @@ -1173,4 +1218,15 @@ mod test { ] ); } + + #[docify::export] + #[test] + fn test_simple_count_works() { + type FooCountedMap = CountedStorageMap; + TestExternalities::default().execute_with(|| { + FooCountedMap::insert(1, 1); + FooCountedMap::insert(2, 2); + assert_eq!(FooCountedMap::count(), 2); + }); + } } diff --git a/substrate/frame/support/src/storage/types/counted_nmap.rs b/substrate/frame/support/src/storage/types/counted_nmap.rs index 5da31c059225..c2c2197aceee 100644 --- a/substrate/frame/support/src/storage/types/counted_nmap.rs +++ b/substrate/frame/support/src/storage/types/counted_nmap.rs @@ -33,8 +33,8 @@ use sp_metadata_ir::StorageEntryMetadataIR; use sp_runtime::traits::Saturating; use sp_std::prelude::*; -/// A wrapper around a `StorageNMap` and a `StorageValue` to keep track of how many items -/// are in a map, without needing to iterate over all of the values. +/// A wrapper around a [`StorageNMap`] and a [`StorageValue`] (with the value being `u32`) to keep +/// track of how many items are in a map, without needing to iterate all the values. /// /// This storage item has some additional storage read and write overhead when manipulating values /// compared to a regular storage map. @@ -45,6 +45,49 @@ use sp_std::prelude::*; /// /// Whenever the counter needs to be updated, an additional read and write occurs to update that /// counter. +/// +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Example +/// +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink CountedStorageNMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = CountedStorageNMap< +/// _, +/// ( +/// NMapKey, +/// NMapKey, +/// NMapKey +/// ), +/// u64, +/// ValueQuery, +/// >; +/// +/// /// Alternative named syntax. +/// #[pallet::storage] +/// pub type Bar = CountedStorageNMap< +/// Key = ( +/// NMapKey, +/// NMapKey, +/// NMapKey +/// ), +/// Value = u64, +/// QueryKind = ValueQuery, +/// >; +/// } +/// ``` pub struct CountedStorageNMap< Prefix, Key, diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs index 519ffcbafade..1002222a895c 100644 --- a/substrate/frame/support/src/storage/types/double_map.rs +++ b/substrate/frame/support/src/storage/types/double_map.rs @@ -31,22 +31,66 @@ use sp_arithmetic::traits::SaturatedConversion; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; use sp_std::prelude::*; -/// A type that allow to store values for `(key1, key2)` couple. Similar to `StorageMap` but allow -/// to iterate and remove value associated to first key. +/// A type representing a *double map* in storage. This structure associates a pair of keys with a +/// value of a specified type stored on-chain. /// -/// Each value is stored at: -/// ```nocompile -/// Twox128(Prefix::pallet_prefix()) -/// ++ Twox128(Prefix::STORAGE_PREFIX) -/// ++ Hasher1(encode(key1)) -/// ++ Hasher2(encode(key2)) +/// A double map with keys `k1` and `k2` can be likened to a +/// [`StorageMap`](frame_support::storage::types::StorageMap) with a key of type `(k1, k2)`. +/// However, a double map offers functions specific to each key, enabling partial iteration and +/// deletion based on one key alone. +/// +/// Also, conceptually, a double map is a special case of a +/// [`StorageNMap`](frame_support::storage::types::StorageNMap) using two keys. +/// +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Examples +/// +/// ### Kitchen-sink +/// +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink StorageDoubleMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = StorageDoubleMap< +/// _, +/// Blake2_128Concat, +/// u8, +/// Twox64Concat, +/// u16, +/// u32, +/// ValueQuery +/// >; +/// +/// /// Alternative named syntax. +/// #[pallet::storage] +/// pub type Bar = StorageDoubleMap< +/// Hasher1 = Blake2_128Concat, +/// Key1 = u8, +/// Hasher2 = Twox64Concat, +/// Key2 = u16, +/// Value = u32, +/// QueryKind = ValueQuery +/// >; +/// } /// ``` /// -/// # Warning +/// ### Partial Iteration & Removal /// -/// If the key1s (or key2s) are not trusted (e.g. can be set by a user), a cryptographic `hasher` -/// such as `blake2_128_concat` must be used for Hasher1 (resp. Hasher2). Otherwise, other values -/// in storage can be compromised. +/// When `Hasher1` and `Hasher2` implement the +/// [`ReversibleStorageHasher`](frame_support::ReversibleStorageHasher) trait, the first key `k1` +/// can be used to partially iterate over keys and values of the double map, and to delete items. +#[doc = docify::embed!("src/storage/types/double_map.rs", example_double_map_partial_operations)] pub struct StorageDoubleMap< Prefix, Hasher1, @@ -742,6 +786,7 @@ mod test { use crate::{hash::*, storage::types::ValueQuery}; use sp_io::{hashing::twox_128, TestExternalities}; use sp_metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}; + use std::collections::BTreeSet; struct Prefix; impl StorageInstance for Prefix { @@ -972,4 +1017,30 @@ mod test { assert_eq!(A::drain_prefix(4).collect::>(), vec![]); }) } + + #[docify::export] + #[test] + fn example_double_map_partial_operations() { + type FooDoubleMap = + StorageDoubleMap; + + TestExternalities::default().execute_with(|| { + FooDoubleMap::insert(0, 0, 42); + FooDoubleMap::insert(0, 1, 43); + FooDoubleMap::insert(1, 0, 314); + + // should be equal to {0,1} (ordering is random) + let collected_k2_keys: BTreeSet<_> = FooDoubleMap::iter_key_prefix(0).collect(); + assert_eq!(collected_k2_keys, [0, 1].iter().copied().collect::>()); + + // should be equal to {42,43} (ordering is random) + let collected_k2_values: BTreeSet<_> = FooDoubleMap::iter_prefix_values(0).collect(); + assert_eq!(collected_k2_values, [42, 43].iter().copied().collect::>()); + + // Remove items from the map using k1 = 0 + let _ = FooDoubleMap::clear_prefix(0, u32::max_value(), None); + // Values associated with (0, _) should have been removed + assert_eq!(FooDoubleMap::iter_prefix(0).collect::>(), vec![]); + }); + } } diff --git a/substrate/frame/support/src/storage/types/map.rs b/substrate/frame/support/src/storage/types/map.rs index 7f936a8a35a6..81a3dd270d81 100644 --- a/substrate/frame/support/src/storage/types/map.rs +++ b/substrate/frame/support/src/storage/types/map.rs @@ -31,19 +31,45 @@ use sp_arithmetic::traits::SaturatedConversion; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; use sp_std::prelude::*; -/// A type that allow to store value for given key. Allowing to insert/remove/iterate on values. +/// A type representing a *map* in storage. A *storage map* is a mapping of keys to values of a +/// given type stored on-chain. /// -/// Each value is stored at: -/// ```nocompile -/// Twox128(Prefix::pallet_prefix()) -/// ++ Twox128(Prefix::STORAGE_PREFIX) -/// ++ Hasher1(encode(key)) -/// ``` +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Example /// -/// # Warning +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink StorageMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = StorageMap< +/// _ +/// Blake2_128Concat, +/// u32, +/// u32, +/// ValueQuery +/// >; /// -/// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` such as -/// `blake2_128_concat` must be used. Otherwise, other values in storage can be compromised. +/// /// Alternative named syntax. +/// #[pallet::storage] +/// pub type Bar = StorageMap< +/// Hasher = Blake2_128Concat, +/// Key = u32, +/// Value = u32, +/// QueryKind = ValueQuery +/// >; +/// } +/// ``` pub struct StorageMap< Prefix, Hasher, diff --git a/substrate/frame/support/src/storage/types/mod.rs b/substrate/frame/support/src/storage/types/mod.rs index c7f2557099b3..1d995d93e882 100644 --- a/substrate/frame/support/src/storage/types/mod.rs +++ b/substrate/frame/support/src/storage/types/mod.rs @@ -43,13 +43,17 @@ pub use value::StorageValue; /// Trait implementing how the storage optional value is converted into the queried type. /// -/// It is implemented by: -/// * `OptionQuery` which converts an optional value to an optional value, used when querying +/// It is implemented most notable by: +/// +/// * [`OptionQuery`] which converts an optional value to an optional value, used when querying /// storage returns an optional value. -/// * `ResultQuery` which converts an optional value to a result value, used when querying storage +/// * [`ResultQuery`] which converts an optional value to a result value, used when querying storage /// returns a result value. -/// * `ValueQuery` which converts an optional value to a value, used when querying storage returns a -/// value. +/// * [`ValueQuery`] which converts an optional value to a value, used when querying storage returns +/// a value. +/// +/// ## Example +#[doc = docify::embed!("src/storage/types/mod.rs", value_query_examples)] pub trait QueryKindTrait { /// Metadata for the storage kind. const METADATA: StorageEntryModifierIR; @@ -65,11 +69,10 @@ pub trait QueryKindTrait { fn from_query_to_optional_value(v: Self::Query) -> Option; } -/// Implement QueryKindTrait with query being `Option` +/// Implements [`QueryKindTrait`] with `Query` type being `Option<_>`. /// -/// NOTE: it doesn't support a generic `OnEmpty`. This means only `None` can be -/// returned when no value is found. To use another `OnEmpty` implementation, `ValueQuery` can be -/// used instead. +/// NOTE: it doesn't support a generic `OnEmpty`. This means only `None` can be returned when no +/// value is found. To use another `OnEmpty` implementation, `ValueQuery` can be used instead. pub struct OptionQuery; impl QueryKindTrait for OptionQuery where @@ -89,7 +92,7 @@ where } } -/// Implement QueryKindTrait with query being `Result` +/// Implements [`QueryKindTrait`] with `Query` type being `Result`. pub struct ResultQuery(sp_std::marker::PhantomData); impl QueryKindTrait for ResultQuery where @@ -113,7 +116,7 @@ where } } -/// Implement QueryKindTrait with query being `Value` +/// Implements [`QueryKindTrait`] with `Query` type being `Value`. pub struct ValueQuery; impl QueryKindTrait for ValueQuery where @@ -140,3 +143,60 @@ pub trait StorageEntryMetadataBuilder { /// Build into `entries` the storage metadata entries of a storage given some `docs`. fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); } + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + storage::types::ValueQuery, + traits::{Get, StorageInstance}, + }; + use sp_io::TestExternalities; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + #[docify::export] + #[test] + pub fn value_query_examples() { + /// Custom default impl to be used with `ValueQuery`. + struct UniverseSecret; + impl Get for UniverseSecret { + fn get() -> u32 { + 42 + } + } + + /// Custom default impl to be used with `ResultQuery`. + struct GetDefaultForResult; + impl Get> for GetDefaultForResult { + fn get() -> Result { + Err(()) + } + } + + type A = StorageValue; + type B = StorageValue; + type C = StorageValue, GetDefaultForResult>; + type D = StorageValue; + + TestExternalities::default().execute_with(|| { + // normal value query returns default + assert_eq!(A::get(), 0); + + // option query returns none + assert_eq!(B::get(), None); + + // result query returns error + assert_eq!(C::get(), Err(())); + + // value query with custom onempty returns 42 + assert_eq!(D::get(), 42); + }); + } +} diff --git a/substrate/frame/support/src/storage/types/nmap.rs b/substrate/frame/support/src/storage/types/nmap.rs index 406fd42eaf7b..0723db689002 100755 --- a/substrate/frame/support/src/storage/types/nmap.rs +++ b/substrate/frame/support/src/storage/types/nmap.rs @@ -33,24 +33,54 @@ use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; use sp_runtime::SaturatedConversion; use sp_std::prelude::*; -/// A type that allow to store values for an arbitrary number of keys in the form of -/// `(Key, Key, ..., Key)`. +/// A type representing an *NMap* in storage. This structure associates an arbitrary number of keys +/// with a value of a specified type stored on-chain. /// -/// Each value is stored at: -/// ```nocompile -/// Twox128(Prefix::pallet_prefix()) -/// ++ Twox128(Prefix::STORAGE_PREFIX) -/// ++ Hasher1(encode(key1)) -/// ++ Hasher2(encode(key2)) -/// ++ ... -/// ++ HasherN(encode(keyN)) -/// ``` +/// For example, [`StorageDoubleMap`](frame_support::storage::types::StorageDoubleMap) is a special +/// case of an *NMap* with N = 2. +/// +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. /// -/// # Warning +/// # Example /// -/// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` -/// such as `blake2_128_concat` must be used for the key hashers. Otherwise, other values -/// in storage can be compromised. +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink StorageNMap, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = StorageNMap< +/// _, +/// ( +/// NMapKey, +/// NMapKey, +/// NMapKey +/// ), +/// u64, +/// ValueQuery, +/// >; +/// +/// /// Named alternative syntax. +/// #[pallet::storage] +/// pub type Bar = StorageNMap< +/// Key = ( +/// NMapKey, +/// NMapKey, +/// NMapKey +/// ), +/// Value = u64, +/// QueryKind = ValueQuery, +/// >; +/// } +/// ``` pub struct StorageNMap< Prefix, Key, diff --git a/substrate/frame/support/src/storage/types/value.rs b/substrate/frame/support/src/storage/types/value.rs index 3e1f2fe9551d..9fff1774d7b4 100644 --- a/substrate/frame/support/src/storage/types/value.rs +++ b/substrate/frame/support/src/storage/types/value.rs @@ -30,11 +30,36 @@ use sp_arithmetic::traits::SaturatedConversion; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; use sp_std::prelude::*; -/// A type that allow to store a value. +/// A type representing a *value* in storage. A *storage value* is a single value of a given type +/// stored on-chain. /// -/// Each value is stored at: -/// ```nocompile -/// Twox128(Prefix::pallet_prefix()) ++ Twox128(Prefix::STORAGE_PREFIX) +/// For general information regarding the `#[pallet::storage]` attribute, refer to +/// [`crate::pallet_macros::storage`]. +/// +/// # Example +/// +/// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// # use frame_support::pallet_prelude::*; +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// /// A kitchen-sink StorageValue, with all possible additional attributes. +/// #[pallet::storage] +/// #[pallet::getter(fn foo)] +/// #[pallet::storage_prefix = "OtherFoo"] +/// #[pallet::unbounded] +/// pub type Foo = StorageValue<_, u32,ValueQuery>; +/// +/// /// Named alternative syntax. +/// #[pallet::storage] +/// pub type Bar = StorageValue< +/// Value = u32, +/// QueryKind = ValueQuery +/// >; +/// } /// ``` pub struct StorageValue( core::marker::PhantomData<(Prefix, Value, QueryKind, OnEmpty)>, From ac9bf49411b9de4545ad4ef97d696a74bd536871 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 1 Nov 2023 17:43:00 +0200 Subject: [PATCH 63/69] xcm: MultiLocation::chain_location() takes nonmut reference --- polkadot/xcm/src/v3/multilocation.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index f948340abcb5..a685c15c7d98 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -446,17 +446,18 @@ impl MultiLocation { } /// Return the MultiLocation subsection identifying the chain that `self` points to. - pub fn chain_location(mut self) -> MultiLocation { + pub fn chain_location(&self) -> MultiLocation { + let mut clone = self.clone(); // start popping junctions until we reach chain identifier - while let Some(j) = self.last() { + while let Some(j) = clone.last() { if matches!(j, Junction::Parachain(_) | Junction::GlobalConsensus(_)) { // return chain subsection - return self + return clone } else { - (self, _) = self.split_last_interior(); + (clone, _) = clone.split_last_interior(); } } - MultiLocation::new(self.parents, Junctions::Here) + MultiLocation::new(clone.parents, Junctions::Here) } } From e85e3390609ee69bda7316c03bbe0622042fda7a Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 1 Nov 2023 18:01:32 +0200 Subject: [PATCH 64/69] pallet-xcm: benchmarks: enforce single asset transfer at the api level --- .../assets/asset-hub-kusama/src/lib.rs | 8 ++--- .../assets/asset-hub-polkadot/src/lib.rs | 8 ++--- .../assets/asset-hub-rococo/src/lib.rs | 8 ++--- .../assets/asset-hub-westend/src/lib.rs | 8 ++--- .../bridge-hubs/bridge-hub-kusama/src/lib.rs | 6 ++-- .../bridge-hub-polkadot/src/lib.rs | 6 ++-- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 6 ++-- .../collectives-polkadot/src/lib.rs | 4 +-- .../contracts/contracts-rococo/src/lib.rs | 6 ++-- polkadot/runtime/rococo/src/lib.rs | 8 ++--- polkadot/runtime/westend/src/lib.rs | 11 +++---- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 30 +++++++------------ polkadot/xcm/pallet-xcm/src/mock.rs | 8 ++--- 13 files changed, 52 insertions(+), 65 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 25d5712f4dfb..e5261cf06b63 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -1217,18 +1217,18 @@ impl_runtime_apis! { Some(Parent.into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported between AH and Relay. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, Parent.into(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // AH can reserve transfer native token to some random parachain. let random_para_id = 43211234; ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( @@ -1238,7 +1238,7 @@ impl_runtime_apis! { MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, ParentThen(Parachain(random_para_id).into()).into(), )) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index dd111355c02b..99898c1a5b44 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -1096,18 +1096,18 @@ impl_runtime_apis! { Some(Parent.into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported between AH and Relay. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, Parent.into(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // AH can reserve transfer native token to some random parachain. let random_para_id = 43211234; ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( @@ -1117,7 +1117,7 @@ impl_runtime_apis! { MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, ParentThen(Parachain(random_para_id).into()).into(), )) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 1f54c3ca997b..06787c9e1e7e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1305,18 +1305,18 @@ impl_runtime_apis! { Some(Parent.into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported between AH and Relay. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, Parent.into(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // AH can reserve transfer native token to some random parachain. let random_para_id = 43211234; ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( @@ -1326,7 +1326,7 @@ impl_runtime_apis! { MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, ParentThen(Parachain(random_para_id).into()).into(), )) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index cc04db22785b..e23b9b36e12d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1281,18 +1281,18 @@ impl_runtime_apis! { Some(Parent.into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported between AH and Relay. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, Parent.into(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // AH can reserve transfer native token to some random parachain. let random_para_id = 43211234; ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( @@ -1302,7 +1302,7 @@ impl_runtime_apis! { MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, ParentThen(Parachain(random_para_id).into()).into(), )) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index 7ff1e1cdb356..ba4d373de3b0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -678,18 +678,18 @@ impl_runtime_apis! { Some(Parent.into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported between BH and Relay. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, Parent.into(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Reserve transfers are disabled on BH. None } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index 39238689d458..18269d713c45 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -678,18 +678,18 @@ impl_runtime_apis! { Some(Parent.into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported between BH and Relay. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, Parent.into(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Reserve transfers are disabled on BH. None } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 77f6d8cdd1ea..4914554227a9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -867,18 +867,18 @@ impl_runtime_apis! { Some(Parent.into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported between BH and Relay. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, Parent.into(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Reserve transfers are disabled on BH. None } diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index 258ecd54f901..3fc2215c7658 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -943,7 +943,7 @@ impl_runtime_apis! { Some(Parent.into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported between Collectives and Relay. Some(( MultiAsset { @@ -954,7 +954,7 @@ impl_runtime_apis! { )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Reserve transfers are disabled on Collectives. None } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 4353d209fe21..958ee94b5ac1 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -687,18 +687,18 @@ impl_runtime_apis! { Some(Parent.into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported between Contracts-System-Para and Relay. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Parent.into()) - }.into(), + }, Parent.into(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Reserve transfers are disabled on Contracts-System-Para. None } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 6171576d30c0..643cde530078 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2089,24 +2089,24 @@ sp_api::impl_runtime_apis! { Some(crate::xcm_config::AssetHub::get()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported to/from AH. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) - }.into(), + }, crate::xcm_config::AssetHub::get(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay can reserve transfer native token to some random parachain. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) - }.into(), + }, Parachain(43211234).into(), )) } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 79145855d71a..47454dae0ad6 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2171,24 +2171,21 @@ sp_api::impl_runtime_apis! { Some(crate::xcm_config::AssetHub::get()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay/native token can be teleported to/from AH. Some(( - MultiAsset { - fun: Fungible(EXISTENTIAL_DEPOSIT), - id: Concrete(Here.into()) - }.into(), + MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }, crate::xcm_config::AssetHub::get(), )) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { // Relay can reserve transfer native token to some random parachain. Some(( MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) - }.into(), + }, crate::Junction::Parachain(43211234).into(), )) } diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index ab2941b10de3..fb3929bfa8e3 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -39,23 +39,23 @@ pub trait Config: crate::Config { None } - /// A `(MultiAssets, MultiLocation)` pair representing assets and the destination they can - /// be teleported to. Used only in benchmarks. + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// teleported to. Used only in benchmarks. /// /// Implementation should also make sure `dest` is reachable/connected. /// /// If `None`, the benchmarks that depend on this will be skipped. - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { None } - /// A `(MultiAssets, MultiLocation)` pair representing assets and the destination they can - /// be reserve-transferred to. Used only in benchmarks. + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// reserve-transferred to. Used only in benchmarks. /// /// Implementation should also make sure `dest` is reachable/connected. /// /// If `None`, the benchmarks that depend on this will be skipped. - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { None } } @@ -81,20 +81,15 @@ benchmarks! { }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) teleport_assets { - let (assets, destination) = T::teleportable_assets_and_dest().ok_or( + let (asset, destination) = T::teleportable_asset_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; - // most chains deploying `pallet-xcm` don't have `pallet-assets` so we're - // stuck with using native token and `pallet-balances`. - if assets.len() != 1 { - return Err(BenchmarkError::Stop("Generic benchmark supports only single native asset")) - } - let asset = assets.inner().clone().pop().unwrap(); let transferred_amount = match &asset.fun { Fungible(amount) => *amount, _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), }.into(); + let assets: MultiAssets = asset.into(); let existential_deposit = T::ExistentialDeposit::get(); let caller = whitelisted_caller(); @@ -125,20 +120,15 @@ benchmarks! { } reserve_transfer_assets { - let (assets, destination) = T::reserve_transferable_assets_and_dest().ok_or( + let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; - // most chains deploying `pallet-xcm` don't have `pallet-assets` so we're - // stuck with using native token and `pallet-balances`. - if assets.len() != 1 { - return Err(BenchmarkError::Stop("Generic benchmark supports only single native asset")) - } - let asset = assets.inner().clone().pop().unwrap(); let transferred_amount = match &asset.fun { Fungible(amount) => *amount, _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), }.into(); + let assets: MultiAssets = asset.into(); let existential_deposit = T::ExistentialDeposit::get(); let caller = whitelisted_caller(); diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index b88734b9abfb..c018ef723fe4 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -538,13 +538,13 @@ impl super::benchmarking::Config for Test { Some(Parachain(1000).into()) } - fn teleportable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { - Some((NativeAsset::get().into(), SystemParachainLocation::get())) + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + Some((NativeAsset::get(), SystemParachainLocation::get())) } - fn reserve_transferable_assets_and_dest() -> Option<(MultiAssets, MultiLocation)> { + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { Some(( - MultiAsset { fun: Fungible(10), id: Concrete(Here.into_location()) }.into(), + MultiAsset { fun: Fungible(10), id: Concrete(Here.into_location()) }, Parachain(OTHER_PARA_ID).into(), )) } From 9987bbb1d264672d406071fb2e164e4e80117e13 Mon Sep 17 00:00:00 2001 From: jserrat <35823283+Jpserrat@users.noreply.github.com> Date: Wed, 1 Nov 2023 13:11:35 -0300 Subject: [PATCH 65/69] Remove transitional code wrt executor parameters (#2112) --- polkadot/runtime/parachains/src/runtime_api_impl/v7.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 35d92f71084f..4d0bbc6a8960 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -370,15 +370,7 @@ pub fn get_session_disputes( pub fn session_executor_params( session_index: SessionIndex, ) -> Option { - // This is to bootstrap the storage working around the runtime migration issue: - // https://github.com/paritytech/substrate/issues/9997 - // After the bootstrap is complete (no less than 7 session passed with the runtime) - // this code should be replaced with a pure - // >::session_executor_params(session_index) call. - match >::session_executor_params(session_index) { - Some(ep) => Some(ep), - None => Some(ExecutorParams::default()), - } + >::session_executor_params(session_index) } /// Implementation of `unapplied_slashes` runtime API From 9f7538dc333ed18472cef0f6742cc354a07d73bf Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 1 Nov 2023 18:51:48 +0200 Subject: [PATCH 66/69] xcm-executor: rename AssetTransferSupport to XcmAssetTransfer --- polkadot/xcm/pallet-xcm/src/lib.rs | 8 ++++---- polkadot/xcm/xcm-executor/src/lib.rs | 4 ++-- polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs | 2 +- polkadot/xcm/xcm-executor/src/traits/mod.rs | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 3af0a48b2440..a89baedb7f4c 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -42,7 +42,7 @@ use sp_runtime::{ use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_executor::traits::{ - AssetTransferError, AssetTransferSupport, ConvertOrigin, Properties, TransferType, + AssetTransferError, ConvertOrigin, Properties, TransferType, XcmAssetTransfers, }; use frame_support::{ @@ -208,7 +208,7 @@ pub mod pallet { type XcmExecuteFilter: Contains<(MultiLocation, Xcm<::RuntimeCall>)>; /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<::RuntimeCall> + AssetTransferSupport; + type XcmExecutor: ExecuteXcm<::RuntimeCall> + XcmAssetTransfers; /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; @@ -1635,13 +1635,13 @@ impl Pallet { // be in error, there would need to be an accounting violation by ourselves, // so it's unlikely, but we don't want to allow that kind of bug to leak into // a trusted chain. - ::AssetTransactor::can_check_out( + ::AssetTransactor::can_check_out( &dest, &fees, &dummy_context, ) .map_err(|_| Error::::CannotCheckOutTeleport)?; - ::AssetTransactor::check_out( + ::AssetTransactor::check_out( &dest, &fees, &dummy_context, diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index b8e4c53c9889..53a2620a37be 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -38,7 +38,7 @@ use traits::{ mod assets; pub use assets::Assets; mod config; -use crate::traits::AssetTransferSupport; +use crate::traits::XcmAssetTransfers; pub use config::Config; /// A struct to specify how fees are being paid. @@ -255,7 +255,7 @@ impl ExecuteXcm for XcmExecutor AssetTransferSupport for XcmExecutor { +impl XcmAssetTransfers for XcmExecutor { type IsReserve = Config::IsReserve; type IsTeleporter = Config::IsTeleporter; type AssetTransactor = Config::AssetTransactor; diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs index 980554f2dce2..5fdc9b15e015 100644 --- a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -44,7 +44,7 @@ pub enum TransferType { /// A trait for identifying asset transfer type based on `IsTeleporter` and `IsReserve` /// configurations. -pub trait AssetTransferSupport { +pub trait XcmAssetTransfers { /// Combinations of (Asset, Location) pairs which we trust as reserves. Meaning /// reserve-based-transfers are to be used for assets matching this filter. type IsReserve: ContainsPair; diff --git a/polkadot/xcm/xcm-executor/src/traits/mod.rs b/polkadot/xcm/xcm-executor/src/traits/mod.rs index 1723da0c3f40..71e75c77e939 100644 --- a/polkadot/xcm/xcm-executor/src/traits/mod.rs +++ b/polkadot/xcm/xcm-executor/src/traits/mod.rs @@ -25,7 +25,7 @@ pub use asset_exchange::AssetExchange; mod asset_lock; pub use asset_lock::{AssetLock, Enact, LockError}; mod asset_transfer; -pub use asset_transfer::{AssetTransferSupport, Error as AssetTransferError, TransferType}; +pub use asset_transfer::{Error as AssetTransferError, TransferType, XcmAssetTransfers}; mod export; pub use export::{export_xcm, validate_export, ExportXcm}; mod fee_manager; From 45279ccce23e3dbf3ee0dd0b04ddf7a1545ce3f2 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 1 Nov 2023 20:19:12 +0200 Subject: [PATCH 67/69] clippy --- polkadot/xcm/src/v3/multilocation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index a685c15c7d98..89e259844438 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -447,7 +447,7 @@ impl MultiLocation { /// Return the MultiLocation subsection identifying the chain that `self` points to. pub fn chain_location(&self) -> MultiLocation { - let mut clone = self.clone(); + let mut clone = *self; // start popping junctions until we reach chain identifier while let Some(j) = clone.last() { if matches!(j, Junction::Parachain(_) | Junction::GlobalConsensus(_)) { From ebe8bafac6de7e256f6f40c1b9e662d45d4c38d4 Mon Sep 17 00:00:00 2001 From: Alistair Singh Date: Wed, 1 Nov 2023 21:33:07 +0200 Subject: [PATCH 68/69] fixes --- .../bridge-hub-rococo/src/xcm_config.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 6a074bf237f6..bec3d5b0c39c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -58,8 +58,9 @@ use xcm::latest::prelude::*; use xcm_builder::{ deposit_or_burn_fee, AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, HandleFee, - IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, + DescribeFamily, EnsureXcmOrigin, HandleFee, HashedDescription, IsConcrete, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, @@ -490,6 +491,17 @@ impl ExportXcm for BridgeHubRococoOrBridgeHubWococoSwitchExporter { } } +pub struct AllowSiblingsOnly; +impl Contains for AllowSiblingsOnly { + fn contains(location: &MultiLocation) -> bool { + if let MultiLocation { parents: 1, interior: X1(Parachain(_)) } = location { + true + } else { + false + } + } +} + /// A `HandleFee` implementation that simply deposits the fees for `ExportMessage` XCM instructions /// into the accounts that are used for paying the relayer rewards. /// Burns the fees in case of a failure. From 2dd39ade8b332fff320c31f0dc886a734c6d9c97 Mon Sep 17 00:00:00 2001 From: Alistair Singh Date: Wed, 1 Nov 2023 21:50:21 +0200 Subject: [PATCH 69/69] remove duplicated trait --- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 3 +-- .../bridge-hubs/bridge-hub-rococo/src/xcm_config.rs | 11 ----------- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index ac4c822c0480..a5a9582d1a04 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -99,7 +99,6 @@ use xcm::latest::prelude::*; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; #[cfg(not(feature = "runtime-benchmarks"))] -use crate::xcm_config::AllowSiblingsOnly; use crate::{ bridge_hub_rococo_config::BridgeRefundBridgeHubWococoMessages, bridge_hub_wococo_config::BridgeRefundBridgeHubRococoMessages, xcm_config::XcmRouter, @@ -620,7 +619,7 @@ impl snowbridge_control::Config for Runtime { type OwnParaId = ParachainInfo; type OutboundQueue = EthereumOutboundQueue; type MessageHasher = BlakeTwo256; - type SiblingOrigin = EnsureXcm; + type SiblingOrigin = EnsureXcm; type AgentIdOf = xcm_config::AgentIdOf; type TreasuryAccount = TreasuryAccount; type Token = Balances; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index bec3d5b0c39c..5571432fc030 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -491,17 +491,6 @@ impl ExportXcm for BridgeHubRococoOrBridgeHubWococoSwitchExporter { } } -pub struct AllowSiblingsOnly; -impl Contains for AllowSiblingsOnly { - fn contains(location: &MultiLocation) -> bool { - if let MultiLocation { parents: 1, interior: X1(Parachain(_)) } = location { - true - } else { - false - } - } -} - /// A `HandleFee` implementation that simply deposits the fees for `ExportMessage` XCM instructions /// into the accounts that are used for paying the relayer rewards. /// Burns the fees in case of a failure.