From 24248d96310954b2d52d137fb160bbc443236e7a Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 9 Jan 2025 17:58:19 +0100 Subject: [PATCH] Add multisig migration Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 4 + Cargo.toml | 1 + integration-tests/ahm/Cargo.toml | 1 + integration-tests/ahm/src/mock.rs | 17 ++ integration-tests/ahm/src/tests.rs | 35 +-- pallets/ah-migrator/Cargo.toml | 5 + pallets/ah-migrator/src/lib.rs | 52 ++++- pallets/ah-migrator/src/multisig.rs | 54 +++++ pallets/rc-migrator/Cargo.toml | 7 +- pallets/rc-migrator/src/accounts.rs | 27 +-- pallets/rc-migrator/src/lib.rs | 196 ++++++++++++----- pallets/rc-migrator/src/multisig.rs | 206 ++++++++++++++++++ pallets/rc-migrator/src/types.rs | 15 ++ .../asset-hub-polkadot/src/migration.rs | 3 +- 14 files changed, 506 insertions(+), 117 deletions(-) create mode 100644 pallets/ah-migrator/src/multisig.rs create mode 100644 pallets/rc-migrator/src/multisig.rs diff --git a/Cargo.lock b/Cargo.lock index fbad58d4f8..cb2cf0a135 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7630,8 +7630,10 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "hex-literal", "log", "pallet-balances", + "pallet-multisig", "pallet-nomination-pools", "pallet-preimage", "pallet-rc-migrator", @@ -8856,6 +8858,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", + "pallet-multisig", "pallet-staking", "parity-scale-codec", "polkadot-parachain-primitives", @@ -10038,6 +10041,7 @@ dependencies = [ "sp-core 34.0.0", "sp-io 38.0.0", "sp-runtime 39.0.2", + "sp-state-machine 0.43.0", "sp-storage 21.0.0", "sp-tracing 17.0.1", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 889d1a0b40..521dd9be58 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -231,6 +231,7 @@ sp-offchain = { version = "34.0.0", default-features = false } sp-runtime = { version = "39.0.1", default-features = false } sp-session = { version = "36.0.0", default-features = false } sp-staking = { version = "36.0.0", default-features = false } +sp-state-machine = { version = "0.43.0", default-features = false } sp-std = { version = "14.0.0", default-features = false } sp-storage = { version = "21.0.0", default-features = false } sp-tracing = { version = "17.0.1", default-features = false } diff --git a/integration-tests/ahm/Cargo.toml b/integration-tests/ahm/Cargo.toml index a4da071fc0..ddb80350c0 100644 --- a/integration-tests/ahm/Cargo.toml +++ b/integration-tests/ahm/Cargo.toml @@ -32,3 +32,4 @@ sp-runtime = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } tokio = { features = ["full", "macros"], workspace = true } +sp-state-machine = { workspace = true, default-features = true } diff --git a/integration-tests/ahm/src/mock.rs b/integration-tests/ahm/src/mock.rs index 529cef3f10..e551aec215 100644 --- a/integration-tests/ahm/src/mock.rs +++ b/integration-tests/ahm/src/mock.rs @@ -15,8 +15,11 @@ // along with Polkadot. If not, see . use asset_hub_polkadot_runtime::Block as AssetHubBlock; +use cumulus_primitives_core::{AggregateMessageOrigin, InboundDownwardMessage}; +use frame_support::traits::EnqueueMessage; use polkadot_runtime::Block as PolkadotBlock; use remote_externalities::{Builder, Mode, OfflineConfig, RemoteExternalities}; +use sp_runtime::BoundedVec; const LOG_RC: &str = "runtime::relay"; const LOG_AH: &str = "runtime::asset-hub"; @@ -73,3 +76,17 @@ pub fn next_block_ah() { frame_system::Pallet::::reset_events(); >::on_initialize(now + 1); } + +/// Enqueue DMP messages on the parachain side. +/// +/// This bypasses `set_validation_data` and `enqueue_inbound_downward_messages` by just directly +/// enqueuing them. +pub fn enqueue_dmp(msgs: Vec) { + for msg in msgs { + let bounded_msg: BoundedVec = msg.msg.try_into().expect("DMP message too big"); + asset_hub_polkadot_runtime::MessageQueue::enqueue_message( + bounded_msg.as_bounded_slice(), + AggregateMessageOrigin::Parent, + ); + } +} diff --git a/integration-tests/ahm/src/tests.rs b/integration-tests/ahm/src/tests.rs index fdeec6a55a..511fe5e433 100644 --- a/integration-tests/ahm/src/tests.rs +++ b/integration-tests/ahm/src/tests.rs @@ -33,13 +33,10 @@ use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{pallet_prelude::*, traits::*, weights::WeightMeter}; -use pallet_rc_migrator::RcMigrationStage; +use pallet_rc_migrator::{MigrationStage, RcMigrationStage}; use polkadot_primitives::InboundDownwardMessage; -use polkadot_runtime::RcMigrator; -use sp_core::H256; -use sp_io::TestExternalities; -use sp_storage::StateVersion; -use std::cell::OnceCell; +use remote_externalities::RemoteExternalities; +use tokio::sync::mpsc::channel; use asset_hub_polkadot_runtime::{Block as AssetHubBlock, Runtime as AssetHub}; use polkadot_runtime::{Block as PolkadotBlock, Runtime as Polkadot}; @@ -61,13 +58,15 @@ async fn account_migration_works() { let new_dmps = runtime_parachains::dmp::DownwardMessageQueues::::take(para_id); - if new_dmps.is_empty() && !dmps.is_empty() { - break; - } dmps.extend(new_dmps); - } - dmps + if RcMigrationStage::::get() == + pallet_rc_migrator::MigrationStage::MultisigMigrationDone + { + log::info!("Multisig migration done"); + break dmps; + } + } }); rc.commit_all().unwrap(); log::info!("Num of RC->AH DMP messages: {}", dmp_messages.len()); @@ -95,17 +94,3 @@ async fn account_migration_works() { // some overweight ones. }); } - -/// Enqueue DMP messages on the parachain side. -/// -/// This bypasses `set_validation_data` and `enqueue_inbound_downward_messages` by just directly -/// enqueuing them. -fn enqueue_dmp(msgs: Vec) { - for msg in msgs { - let bounded_msg: BoundedVec = msg.msg.try_into().expect("DMP message too big"); - asset_hub_polkadot_runtime::MessageQueue::enqueue_message( - bounded_msg.as_bounded_slice(), - AggregateMessageOrigin::Parent, - ); - } -} diff --git a/pallets/ah-migrator/Cargo.toml b/pallets/ah-migrator/Cargo.toml index 491d55cb78..a3f0cc9155 100644 --- a/pallets/ah-migrator/Cargo.toml +++ b/pallets/ah-migrator/Cargo.toml @@ -14,6 +14,7 @@ frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } pallet-balances = { workspace = true } +pallet-multisig = { workspace = true } pallet-nomination-pools = { workspace = true } pallet-preimage = { workspace = true } pallet-rc-migrator = { workspace = true } @@ -29,6 +30,7 @@ sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } +hex-literal = { workspace = true } [features] default = ["std"] @@ -54,6 +56,7 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "pallet-multisig/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -69,6 +72,7 @@ runtime-benchmarks = [ "polkadot-runtime-common/runtime-benchmarks", "runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", @@ -82,4 +86,5 @@ try-runtime = [ "polkadot-runtime-common/try-runtime", "runtime-parachains/try-runtime", "sp-runtime/try-runtime", + "pallet-multisig/try-runtime" ] diff --git a/pallets/ah-migrator/src/lib.rs b/pallets/ah-migrator/src/lib.rs index 467b115b6a..f6551bd195 100644 --- a/pallets/ah-migrator/src/lib.rs +++ b/pallets/ah-migrator/src/lib.rs @@ -31,6 +31,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +pub mod multisig; pub mod types; pub use pallet::*; @@ -44,7 +45,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use pallet_balances::{AccountData, Reasons as LockReasons}; -use pallet_rc_migrator::accounts::Account as RcAccount; +use pallet_rc_migrator::{accounts::Account as RcAccount, multisig::*}; use sp_application_crypto::Ss58Codec; use sp_runtime::{traits::Convert, AccountId32}; use sp_std::prelude::*; @@ -62,6 +63,7 @@ pub mod pallet { pub trait Config: frame_system::Config, AccountId = AccountId32> + pallet_balances::Config + + pallet_multisig::Config { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -91,10 +93,21 @@ pub mod pallet { } #[pallet::event] - //#[pallet::generate_deposit(pub(crate) fn deposit_event)] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] pub enum Event { /// TODO TODO, + /// We received a batch of multisigs that we are going to integrate. + MultisigBatchReceived { + /// How many multisigs are in the batch. + count: u32, + }, + MultisigBatchProcessed { + /// How many multisigs were successfully integrated. + count_good: u32, + /// How many multisigs failed to integrate. + count_bad: u32, + }, } #[pallet::pallet] @@ -109,7 +122,6 @@ pub mod pallet { /// /// The accounts that sent with `pallet_rc_migrator::Pallet::migrate_accounts` function. #[pallet::call_index(0)] - #[pallet::weight({1})] pub fn receive_accounts( origin: OriginFor, accounts: Vec>, @@ -121,13 +133,28 @@ pub mod pallet { Ok(()) } + + /// Receive multisigs from the Relay Chain. + /// + /// This will be called from an XCM `Transact` inside a DMP from the relay chain. The + /// multisigs were prepared by + /// `pallet_rc_migrator::multisig::MultisigMigrator::migrate_many`. + #[pallet::call_index(1)] + pub fn receive_multisigs( + origin: OriginFor, + accounts: Vec>, + ) -> DispatchResult { + ensure_root(origin)?; + + Self::do_receive_multisigs(accounts).map_err(Into::into) + } } impl Pallet { fn do_receive_accounts( accounts: Vec>, ) -> Result<(), Error> { - log::debug!(target: LOG_TARGET, "Integrating {} accounts", accounts.len()); + log::info!(target: LOG_TARGET, "Integrating {} accounts", accounts.len()); for account in accounts { let _: Result<(), ()> = with_transaction_opaque_err::<(), (), _>(|| { @@ -148,7 +175,7 @@ pub mod pallet { ) -> Result<(), Error> { let who = account.who; let total_balance = account.free + account.reserved; - let minted = match T::Currency::mint_into(&who, total_balance) { + let minted = match ::Currency::mint_into(&who, total_balance) { Ok(minted) => minted, Err(e) => { log::error!(target: LOG_TARGET, "Failed to mint into account {}: {:?}", who.to_ss58check(), e); @@ -158,21 +185,24 @@ pub mod pallet { debug_assert!(minted == total_balance); for hold in account.holds { - if let Err(e) = - T::Currency::hold(&T::RcToAhHoldReason::convert(hold.id), &who, hold.amount) - { + if let Err(e) = ::Currency::hold( + &T::RcToAhHoldReason::convert(hold.id), + &who, + hold.amount, + ) { log::error!(target: LOG_TARGET, "Failed to hold into account {}: {:?}", who.to_ss58check(), e); return Err(Error::::TODO); } } - if let Err(e) = T::Currency::reserve(&who, account.unnamed_reserve) { + if let Err(e) = ::Currency::reserve(&who, account.unnamed_reserve) + { log::error!(target: LOG_TARGET, "Failed to reserve into account {}: {:?}", who.to_ss58check(), e); return Err(Error::::TODO); } for freeze in account.freezes { - if let Err(e) = T::Currency::set_freeze( + if let Err(e) = ::Currency::set_freeze( &T::RcToAhFreezeReason::convert(freeze.id), &who, freeze.amount, @@ -183,7 +213,7 @@ pub mod pallet { } for lock in account.locks { - T::Currency::set_lock( + ::Currency::set_lock( lock.id, &who, lock.amount, diff --git a/pallets/ah-migrator/src/multisig.rs b/pallets/ah-migrator/src/multisig.rs new file mode 100644 index 0000000000..6a05aa0629 --- /dev/null +++ b/pallets/ah-migrator/src/multisig.rs @@ -0,0 +1,54 @@ +use crate::{types::*, *}; +use hex_literal::hex; + +/// These multisigs have historical issues where the deposit is missing for the creator. +const KNOWN_BAD_MULTISIGS: &[AccountId32] = &[ + AccountId32::new(hex!("e64d5c0de81b9c960c1dd900ad2a5d9d91c8a683e60dd1308e6bc7f80ea3b25f")), + AccountId32::new(hex!("d55ec415b6703ddf7bec9d5c02a0b642f1f5bd068c6b3c50c2145544046f1491")), + AccountId32::new(hex!("c2ff4f84b7fcee1fb04b4a97800e72321a4bc9939d456ad48d971127fc661c48")), + AccountId32::new(hex!("0a8933d3f2164648399cc48cb8bb8c915abb94a2164c40ad6b48cee005f1cb6e")), + AccountId32::new(hex!("ebe3cd53e580c4cd88acec1c952585b50a44a9b697d375ff648fee582ae39d38")), + AccountId32::new(hex!("e64d5c0de81b9c960c1dd900ad2a5d9d91c8a683e60dd1308e6bc7f80ea3b25f")), + AccountId32::new(hex!("caafae0aaa6333fcf4dc193146945fe8e4da74aa6c16d481eef0ca35b8279d73")), + AccountId32::new(hex!("d429458e57ba6e9b21688441ff292c7cf82700550446b061a6c5dec306e1ef05")), +]; + +impl Pallet { + pub fn do_receive_multisigs(multisigs: Vec>) -> Result<(), Error> { + Self::deposit_event(Event::MultisigBatchReceived { count: multisigs.len() as u32 }); + let (mut count_good, mut count_bad) = (0, 0); + log::info!(target: LOG_TARGET, "Integrating {} multisigs", multisigs.len()); + + for multisig in multisigs { + match Self::do_receive_multisig(multisig) { + Ok(()) => count_good += 1, + Err(e) => { + count_bad += 1; + log::error!(target: LOG_TARGET, "Error while integrating multisig: {:?}", e); + }, + } + } + Self::deposit_event(Event::MultisigBatchProcessed { count_good, count_bad }); + + Ok(()) + } + + pub fn do_receive_multisig(multisig: RcMultisigOf) -> Result<(), Error> { + log::debug!(target: LOG_TARGET, "Integrating multisig {}, {:?}", multisig.creator.to_ss58check(), multisig.deposit); + + let missing = ::Currency::unreserve( + &multisig.creator, + multisig.deposit, + ); + if missing != Default::default() { + if KNOWN_BAD_MULTISIGS.contains(&multisig.creator) { + log::warn!(target: LOG_TARGET, "Failed to unreserve deposit for known bad multisig {}, missing: {:?}", multisig.creator.to_ss58check(), missing); + + log::warn!("{:?}", frame_system::Account::::get(&multisig.creator)); + } else { + log::error!(target: LOG_TARGET, "Failed to unreserve deposit for multisig {} missing {:?}, details: {:?}", multisig.creator.to_ss58check(), missing, multisig.details); + } + } + Ok(()) + } +} diff --git a/pallets/rc-migrator/Cargo.toml b/pallets/rc-migrator/Cargo.toml index 7a83f4fb1c..d71e3109fe 100644 --- a/pallets/rc-migrator/Cargo.toml +++ b/pallets/rc-migrator/Cargo.toml @@ -12,7 +12,6 @@ codec = { workspace = true, features = ["max-encoded-len"] } scale-info = { workspace = true, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } log = { workspace = true } - frame-benchmarking = { workspace = true, optional = true } frame-support = { workspace = true } frame-system = { workspace = true } @@ -20,10 +19,9 @@ sp-core = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } sp-io = { workspace = true } - pallet-balances = { workspace = true } pallet-staking = { workspace = true } - +pallet-multisig = { workspace = true } polkadot-runtime-common = { workspace = true } runtime-parachains = { workspace = true } polkadot-parachain-primitives = { workspace = true } @@ -50,6 +48,7 @@ std = [ "sp-runtime/std", "sp-std/std", "xcm/std", + "pallet-multisig/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -61,6 +60,7 @@ runtime-benchmarks = [ "polkadot-runtime-common/runtime-benchmarks", "runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime", @@ -70,4 +70,5 @@ try-runtime = [ "polkadot-runtime-common/try-runtime", "runtime-parachains/try-runtime", "sp-runtime/try-runtime", + "pallet-multisig/try-runtime" ] diff --git a/pallets/rc-migrator/src/accounts.rs b/pallets/rc-migrator/src/accounts.rs index 0046196ab4..f9ed5a6cd9 100644 --- a/pallets/rc-migrator/src/accounts.rs +++ b/pallets/rc-migrator/src/accounts.rs @@ -159,23 +159,23 @@ impl Pallet { /// Get the first account that the migration should begin with. /// /// Returns `None` when there are no accounts present. - pub fn first_account(_weight: &mut WeightMeter) -> Result, ()> { + pub fn first_account(_weight: &mut WeightMeter) -> Result, Error> { (); - Ok(SystemAccount::::iter().next().map(|(who, _)| who)) + Ok(SystemAccount::::iter_keys().next()) } // TODO: Currently, we use `debug_assert!` for basic test checks against a production snapshot. /// Migrate accounts from RC to AH. /// /// Parameters: - /// - `maybe_last_key` - the last migrated account from RC to AH if any + /// - `last_key` - the last migrated account from RC to AH if any /// - `weight_counter` - the weight meter /// /// Result: /// - None - no accounts left to be migrated to AH. /// - Some(maybe_last_key) - the last migrated account from RC to AH if pub fn migrate_accounts( - mut maybe_last_key: Option, + last_key: T::AccountId, weight_counter: &mut WeightMeter, ) -> Result, Error> { // we should not send more than AH can handle within the block. @@ -189,21 +189,14 @@ impl Pallet { return Err(Error::OutOfWeight); } - let mut iter = if let Some(ref last_key) = maybe_last_key { - SystemAccount::::iter_from_key(last_key) - } else { - SystemAccount::::iter() - }; - - loop { + let mut iter = SystemAccount::::iter_from_key(last_key); + let maybe_last_key = loop { let Some((who, account_info)) = iter.next() else { - maybe_last_key = None; - break; + break None; }; - maybe_last_key = Some(who.clone()); match Self::migrate_account( - who, + who.clone(), account_info.clone(), weight_counter, &mut ah_weight_counter, @@ -212,7 +205,7 @@ impl Pallet { Ok(None) => continue, Ok(Some(ah_account)) => package.push(ah_account), // Not enough weight, lets try again in the next block since we made some progress. - Err(Error::OutOfWeight) if package.len() > 0 => break, + Err(Error::OutOfWeight) if package.len() > 0 => break Some(who.clone()), // Not enough weight and was unable to make progress, bad. Err(Error::OutOfWeight) if package.len() == 0 => { defensive!("Not enough weight to migrate a single account"); @@ -224,7 +217,7 @@ impl Pallet { return Err(e); }, } - } + }; let call = types::AssetHubPalletConfig::::AhmController( types::AhMigratorCall::::ReceiveAccounts { accounts: package }, diff --git a/pallets/rc-migrator/src/lib.rs b/pallets/rc-migrator/src/lib.rs index b2dd475778..9f79c58eeb 100644 --- a/pallets/rc-migrator/src/lib.rs +++ b/pallets/rc-migrator/src/lib.rs @@ -32,6 +32,7 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod accounts; +pub mod multisig; pub mod types; mod weights; pub use pallet::*; @@ -60,6 +61,9 @@ use types::AhWeightInfo; use weights::WeightInfo; use xcm::prelude::*; +use multisig::MultisigMigrator; +use types::PalletMigration; + /// The log target of this pallet. pub const LOG_TARGET: &str = "runtime::rc-migrator"; @@ -69,13 +73,27 @@ pub enum MigrationStage { #[default] Pending, /// Initializing the account migration process. - InitAccountMigration, + AccountsMigrationInit, // TODO: Initializing? /// Migrating account balances. - MigratingAccounts { + AccountsMigrationOngoing { // Last migrated account - last_key: Option, + last_key: AccountId, + }, + /// Accounts migration is done. Ready to go to the next one. + /// + /// Note that this stage does not have any logic attached to itself. It just exists to make it + /// easier to swap out what stage should run next for testing. + AccountsMigrationDone, + MultisigMigrationInit, + MultiSigMigrationOngoing { + /// Last migrated key of the `Multisigs` double map. + /// + /// The MEL bound must be able to hold an `AccountId32` and a 32 byte hash. TODO 1024 is + /// probably overkill, but we can optimize later. + last_key: BoundedVec>, }, + MultisigMigrationDone, /// Some next stage TODO, } @@ -98,6 +116,7 @@ pub mod pallet { + pallet_balances::Config + hrmp::Config + paras_registrar::Config + + pallet_multisig::Config { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -176,14 +195,6 @@ pub mod pallet { } } - impl Pallet { - fn transition(new_stage: MigrationStage) { - let old = RcMigrationStage::::get(); - RcMigrationStage::::put(&new_stage); - log::info!(target: LOG_TARGET, "[Block {:?}] Stage transition: {:?} -> {:?}", frame_system::Pallet::::block_number(), old, new_stage); - } - } - #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_: BlockNumberFor) -> Weight { @@ -191,58 +202,125 @@ pub mod pallet { let stage = RcMigrationStage::::get(); weight_counter.consume(T::DbWeight::get().reads(1)); - if stage == MigrationStage::Pending { - // TODO: not complete - - let _ = Self::obtain_rc_accounts(); - Self::transition(MigrationStage::InitAccountMigration); - - return weight_counter.consumed(); - } - - if stage == MigrationStage::InitAccountMigration { - let first_acc = match Self::first_account(&mut weight_counter).defensive() { - Err(e) => { - log::error!(target: LOG_TARGET, "Error while obtaining first account: {:?}. Retrying with higher weight.", e); - Self::first_account(&mut WeightMeter::new()).unwrap_or_default() - }, - Ok(acc) => acc, - }; - - Self::transition(MigrationStage::MigratingAccounts { last_key: first_acc }); - return weight_counter.consumed(); - } - - if let MigrationStage::MigratingAccounts { last_key } = stage { - let res = with_transaction_opaque_err::, Error, _>(|| { - TransactionOutcome::Commit(Self::migrate_accounts( - last_key, - &mut weight_counter, - )) - }) - .expect("Always returning Ok; qed"); - - match res { - Ok(None) => { - // accounts migration is completed - // TODO publish event - Self::transition(MigrationStage::TODO); - }, - Ok(Some(last_key)) => { - // accounts migration continues with the next block - // TODO publish event - Self::transition(MigrationStage::MigratingAccounts { - last_key: Some(last_key), + match stage { + MigrationStage::Pending => { + // TODO: not complete + + let _ = Self::obtain_rc_accounts(); + Self::transition(MigrationStage::AccountsMigrationInit); + //Self::transition(MigrationStage::MultisigMigrationInit); + }, + MigrationStage::AccountsMigrationInit => { + let first_acc = match Self::first_account(&mut weight_counter).defensive() { + Err(e) => { + log::error!(target: LOG_TARGET, "Error while obtaining first account: {:?}. Retrying with higher weight.", e); + Self::first_account(&mut WeightMeter::new()).unwrap_or_default() + }, + Ok(acc) => acc, + }; + + if let Some(first_acc) = first_acc { + Self::transition(MigrationStage::AccountsMigrationOngoing { + last_key: first_acc, }); - }, - _ => { - // TODO handle error - }, - } - return weight_counter.consumed(); + } else { + // No keys to migrate at all. + Self::transition(MigrationStage::AccountsMigrationDone); + } + }, + + MigrationStage::AccountsMigrationOngoing { last_key } => { + let res = + with_transaction_opaque_err::, Error, _>(|| { + TransactionOutcome::Commit(Self::migrate_accounts( + last_key, + &mut weight_counter, + )) + }) + .expect("Always returning Ok; qed"); + + match res { + Ok(None) => { + // accounts migration is completed + // TODO publish event + Self::transition(MigrationStage::AccountsMigrationDone); + }, + Ok(Some(last_key)) => { + // accounts migration continues with the next block + // TODO publish event + Self::transition(MigrationStage::AccountsMigrationOngoing { last_key }); + }, + _ => { + // TODO handle error + }, + } + }, + MigrationStage::AccountsMigrationDone => { + // Note: swap this out for faster testing + Self::transition(MigrationStage::MultisigMigrationInit); + }, + MigrationStage::MultisigMigrationInit => { + let first_key = match MultisigMigrator::::first_key(&mut weight_counter) + .defensive() + { + Err(e) => { + log::error!(target: LOG_TARGET, "Error while obtaining first multisig: {:?}. Retrying with higher weight.", e); + MultisigMigrator::::first_key(&mut WeightMeter::new()) + .unwrap_or_default() + }, + Ok(key) => key, + }; + + if let Some(first_key) = first_key { + Self::transition(MigrationStage::MultiSigMigrationOngoing { + last_key: first_key, + }); + } else { + // No keys to migrate at all. + Self::transition(MigrationStage::MultisigMigrationDone); + } + }, + MigrationStage::MultiSigMigrationOngoing { last_key } => { + let res = with_transaction_opaque_err::, Error, _>(|| { + TransactionOutcome::Commit(MultisigMigrator::::migrate_many( + last_key, + &mut weight_counter, + )) + }) + .expect("Always returning Ok; qed"); + + match res { + Ok(None) => { + // multisig migration is completed + // TODO publish event + Self::transition(MigrationStage::MultisigMigrationDone); + }, + Ok(Some(last_key)) => { + // multisig migration continues with the next block + // TODO publish event + Self::transition(MigrationStage::MultiSigMigrationOngoing { last_key }); + }, + _ => { + // TODO handle error + }, + } + }, + MigrationStage::MultisigMigrationDone => {}, + MigrationStage::TODO => { + todo!() + }, }; weight_counter.consumed() } } + + impl Pallet { + /// Execute a stage transition and log it. + fn transition(new_stage: MigrationStage) { + let old = RcMigrationStage::::get(); + RcMigrationStage::::put(&new_stage); + log::info!(target: LOG_TARGET, "[Block {:?}] Stage transition: {:?} -> {:?}", frame_system::Pallet::::block_number(), old, new_stage); + } + } } diff --git a/pallets/rc-migrator/src/multisig.rs b/pallets/rc-migrator/src/multisig.rs new file mode 100644 index 0000000000..ec7d9478d4 --- /dev/null +++ b/pallets/rc-migrator/src/multisig.rs @@ -0,0 +1,206 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::traits::Currency; +use pallet_multisig::Multisigs; + +extern crate alloc; +use crate::{types::*, *}; +use alloc::vec::Vec; + +mod aliases { + use super::*; + use frame_system::pallet_prelude::BlockNumberFor; + use pallet_multisig::Timepoint; + + /// Copied from https://github.com/paritytech/polkadot-sdk/blob/7c5224cb01710d0c14c87bf3463cc79e49b3e7b5/substrate/frame/multisig/src/lib.rs#L96-L111 + #[derive( + Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen, + )] + #[scale_info(skip_type_params(MaxApprovals))] + pub struct Multisig + where + MaxApprovals: Get, + { + /// The extrinsic when the multisig operation was opened. + pub when: Timepoint, + /// The amount held in reserve of the `depositor`, to be returned once the operation ends. + pub deposit: Balance, + /// The account who opened it (i.e. the first to approve it). + pub depositor: AccountId, + /// The approvals achieved so far, including the depositor. Always sorted. + pub approvals: BoundedVec, + } + + /// Copied from https://github.com/paritytech/polkadot-sdk/blob/7c5224cb01710d0c14c87bf3463cc79e49b3e7b5/substrate/frame/multisig/src/lib.rs#L77-L78 + pub type BalanceOf = <::Currency as Currency< + ::AccountId, + >>::Balance; + + /// Copied from https://github.com/paritytech/polkadot-sdk/blob/7c5224cb01710d0c14c87bf3463cc79e49b3e7b5/substrate/frame/multisig/src/lib.rs#L171-L180 + #[frame_support::storage_alias(pallet_name)] + pub type Multisigs = StorageDoubleMap< + pallet_multisig::Pallet, + Twox64Concat, + ::AccountId, + Blake2_128Concat, + [u8; 32], + Multisig< + BlockNumberFor, + BalanceOf, + ::AccountId, + ::MaxSignatories, + >, + >; + + pub type MultisigOf = Multisig< + BlockNumberFor, + BalanceOf, + AccountIdOf, + ::MaxSignatories, + >; +} + +/// A multi sig that was migrated out and is ready to be received by AH. +// NOTE I am not sure if generics here are so smart, since RC and AH *have* to put the same +// generics, otherwise it would be a bug and fail to decode. However, we can just prevent that but +// by not exposing generics... On the other hand: for Westend and Kusama it could possibly help if +// we don't hard-code all types. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct RcMultisig { + /// The creator of the multisig who placed the deposit. + pub creator: AccountId, + /// Amount of the deposit. + pub deposit: Balance, + /// Optional details field to debug. Can be `None` in prod. Contains the derived account. + pub details: Option, +} + +pub type RcMultisigOf = RcMultisig, BalanceOf>; + +type BalanceOf = + <::Currency as Currency>::Balance; + +pub struct MultisigMigrator { + _marker: sp_std::marker::PhantomData, +} + +impl PalletMigration for MultisigMigrator { + type Key = BoundedVec>; + type Error = Error; + + /// The first storage key to migrate. + fn first_key(_weight: &mut WeightMeter) -> Result, Error> { + (); + let Some((k1, k2)) = aliases::Multisigs::::iter_keys().next() else { + return Ok(None); + }; + let encoded_key = aliases::Multisigs::::hashed_key_for(k1, k2); + let bounded_key = BoundedVec::try_from(encoded_key).defensive().map_err(|_| Error::TODO)?; + Ok(Some(bounded_key)) + } + + /// Migrate until the weight is exhausted. Start at the given key. + fn migrate_many( + last_key: Self::Key, + weight_counter: &mut WeightMeter, + ) -> Result, Error> { + let mut batch = Vec::new(); + let mut iter = aliases::Multisigs::::iter_from(last_key.to_vec().clone()); + + let maybe_last_key = loop { + log::debug!("Migrating multisigs from {:?}", last_key); + let kv = iter.next(); + let Some((k1, k2, multisig)) = kv else { + break None; + }; + + match Self::migrate_single(k1.clone(), multisig, weight_counter) { + Ok(ms) => batch.push(ms), // TODO continue here + // Account does not need to be migrated + // Not enough weight, lets try again in the next block since we made some progress. + Err(Error::OutOfWeight) if batch.len() > 0 => + break Some(aliases::Multisigs::::hashed_key_for(k1, k2)), + // Not enough weight and was unable to make progress, bad. + Err(Error::OutOfWeight) if batch.len() == 0 => { + defensive!("Not enough weight to migrate a single account"); + return Err(Error::OutOfWeight); + }, + Err(e) => { + defensive!("Error while migrating account"); + log::error!(target: LOG_TARGET, "Error while migrating account: {:?}", e); + return Err(e); + }, + } + + // TODO construct XCM + }; + + if batch.len() > 0 { + Self::send_batch_xcm(batch)?; + } + + let bounded_key = maybe_last_key + .map(|k| BoundedVec::try_from(k).defensive().map_err(|_| Error::TODO)) + .transpose()?; + Ok(bounded_key) + } +} + +impl MultisigMigrator { + /// WILL NOT MODIFY STORAGE IN THE ERROR CASE + fn migrate_single( + k1: AccountIdOf, + ms: aliases::MultisigOf, + weight_counter: &mut WeightMeter, + ) -> Result, Error> { + // TODO weight + if weight_counter.try_consume(Weight::from_all(1_000)).is_err() { + return Err(Error::::OutOfWeight); + } + + // TODO construct XCM + + Ok(RcMultisig { creator: ms.depositor, deposit: ms.deposit, details: Some(k1) }) + } + + fn send_batch_xcm(multisigs: Vec>) -> Result<(), Error> { + let call = types::AssetHubPalletConfig::::AhmController( + types::AhMigratorCall::::ReceiveMultisigs { multisigs }, + ); + + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Instruction::Transact { + origin_kind: OriginKind::Superuser, + require_weight_at_most: Weight::from_all(1), // TODO + call: call.encode().into(), + }, + ]); + + if let Err(_err) = + send_xcm::(Location::new(0, [Junction::Parachain(1000)]), message.clone()) + { + return Err(Error::TODO); + }; + + Ok(()) + } +} diff --git a/pallets/rc-migrator/src/types.rs b/pallets/rc-migrator/src/types.rs index 9fb0891c4b..8835fc19b8 100644 --- a/pallets/rc-migrator/src/types.rs +++ b/pallets/rc-migrator/src/types.rs @@ -18,6 +18,8 @@ use super::*; +pub type AccountIdOf = ::AccountId; + /// Relay Chain Freeze Reason #[derive(Encode, Decode)] pub enum AssetHubPalletConfig { @@ -30,6 +32,8 @@ pub enum AssetHubPalletConfig { pub enum AhMigratorCall { #[codec(index = 0)] ReceiveAccounts { accounts: Vec> }, + #[codec(index = 1)] + ReceiveMultisigs { multisigs: Vec> }, } /// Copy of `ParaInfo` type from `paras_registrar` pallet. @@ -57,3 +61,14 @@ impl AhWeightInfo for () { Weight::from_all(1) } } + +pub trait PalletMigration { + type Key; + type Error; + + fn first_key(weight: &mut WeightMeter) -> Result, Self::Error>; + fn migrate_many( + maybe_last_key: Self::Key, + weight_counter: &mut WeightMeter, + ) -> Result, Self::Error>; +} diff --git a/system-parachains/asset-hubs/asset-hub-polkadot/src/migration.rs b/system-parachains/asset-hubs/asset-hub-polkadot/src/migration.rs index 5545106824..d3ed0a6e4c 100644 --- a/system-parachains/asset-hubs/asset-hub-polkadot/src/migration.rs +++ b/system-parachains/asset-hubs/asset-hub-polkadot/src/migration.rs @@ -49,8 +49,7 @@ pub struct RcToAhFreezeReason; impl Convert for RcToAhFreezeReason { fn convert(a: RcFreezeReason) { match a { - // TODO mapping - _ => (), + _TODO => (), } } }