Skip to content

Commit

Permalink
Fixed conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
ayushmishra2005 committed Jul 20, 2024
2 parents 3a56f18 + 706e354 commit 1e50ecb
Show file tree
Hide file tree
Showing 5 changed files with 89 additions and 60 deletions.
88 changes: 69 additions & 19 deletions pallets/ddc-customers/src/migration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ pub mod v1 {
}

// Migrate to removable buckets
pub fn migrate_from_v0_to_v2<T: Config>() -> Weight {
pub fn migrate_to_v1<T: Config>() -> Weight {
let on_chain_version = Pallet::<T>::on_chain_storage_version();
if on_chain_version == 0 {
let count = v0::BucketsCount::<T>::get();
Expand All @@ -76,23 +76,22 @@ pub fn migrate_from_v0_to_v2<T: Config>() -> Weight {
" >>> Updating DDC Customers storage. Migrating {} buckets...", count
);

Buckets::<T>::translate::<v0::Bucket<T::AccountId>, _>(
v1::Buckets::<T>::translate::<v0::Bucket<T::AccountId>, _>(
|bucket_id: BucketId, bucket: v0::Bucket<T::AccountId>| {
info!(target: LOG_TARGET, " Migrating bucket for bucket ID {:?}...", bucket_id);

Some(Bucket {
Some(v1::Bucket {
bucket_id: bucket.bucket_id,
owner_id: bucket.owner_id,
cluster_id: bucket.cluster_id,
is_public: bucket.is_public,
is_removed: false,
total_customers_usage: None,
})
},
);

// Update storage version.
StorageVersion::new(2).put::<Pallet<T>>();
StorageVersion::new(1).put::<Pallet<T>>();
let count = v0::BucketsCount::<T>::get();
info!(
target: LOG_TARGET,
Expand All @@ -106,14 +105,65 @@ pub fn migrate_from_v0_to_v2<T: Config>() -> Weight {
}
}

// Migrate to removable buckets
pub fn migrate_from_v1_to_v2<T: Config>() -> Weight {
pub struct MigrateToV1<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV1<T> {
fn on_runtime_upgrade() -> Weight {
migrate_to_v1::<T>()
}

#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, DispatchError> {
let prev_bucket_id = v0::BucketsCount::<T>::get();
let prev_count = v0::Buckets::<T>::iter().count();

Ok((prev_bucket_id, prev_count as u64).encode())
}

#[cfg(feature = "try-runtime")]
fn post_upgrade(prev_state: Vec<u8>) -> Result<(), DispatchError> {
let (prev_bucket_id, prev_count): (u64, u64) =
Decode::decode(&mut &prev_state[..]).expect("pre_upgrade provides a valid state; qed");

let post_bucket_id = Pallet::<T>::buckets_count();
ensure!(
prev_bucket_id == post_bucket_id,
"the last bucket ID before and after the migration should be the same"
);

let post_count = Buckets::<T>::iter().count() as u64;
ensure!(
prev_count == post_count,
"the bucket count before and after the migration should be the same"
);

let current_version = Pallet::<T>::current_storage_version();
let on_chain_version = Pallet::<T>::on_chain_storage_version();

frame_support::ensure!(current_version == 1, "must_upgrade");
ensure!(
current_version == on_chain_version,
"after migration, the current_version and on_chain_version should be the same"
);

Buckets::<T>::iter().try_for_each(|(_id, bucket)| -> Result<(), &'static str> {
ensure!(
!bucket.is_removed,
"At this point all the bucket should have is_removed set to false"
);
Ok(())
})?;
Ok(())
}
}

// New migration to add total_customers_usage field
pub fn migrate_to_v2<T: Config>() -> Weight {
let on_chain_version = Pallet::<T>::on_chain_storage_version();
if on_chain_version == 1 {
let count = v1::BucketsCount::<T>::get();
info!(
target: LOG_TARGET,
" >>> Updating DDC Customers storage. Migrating {} buckets...", count
" >>> Updating DDC Customers storage to v2. Migrating {} buckets...", count
);

Buckets::<T>::translate::<v1::Bucket<T::AccountId>, _>(
Expand All @@ -125,7 +175,7 @@ pub fn migrate_from_v1_to_v2<T: Config>() -> Weight {
owner_id: bucket.owner_id,
cluster_id: bucket.cluster_id,
is_public: bucket.is_public,
is_removed: false,
is_removed: bucket.is_removed,
total_customers_usage: None,
})
},
Expand All @@ -136,26 +186,26 @@ pub fn migrate_from_v1_to_v2<T: Config>() -> Weight {
let count = v1::BucketsCount::<T>::get();
info!(
target: LOG_TARGET,
" <<< DDC Customers storage updated! Migrated {} buckets ✅", count
" <<< DDC Customers storage updated to v2! Migrated {} buckets ✅", count
);

T::DbWeight::get().reads_writes(count + 2, count + 1)
} else {
info!(target: LOG_TARGET, " >>> Unused migration!");
info!(target: LOG_TARGET, " >>> Unused migration to v2!");
T::DbWeight::get().reads(1)
}
}

pub struct MigrateToV1<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV1<T> {
pub struct MigrateToV2<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV2<T> {
fn on_runtime_upgrade() -> Weight {
migrate_from_v1_to_v2::<T>()
migrate_to_v2::<T>()
}

#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, DispatchError> {
let prev_bucket_id = v0::BucketsCount::<T>::get();
let prev_count = v0::Buckets::<T>::iter().count();
let prev_bucket_id = v1::BucketsCount::<T>::get();
let prev_count = v1::Buckets::<T>::iter().count();

Ok((prev_bucket_id, prev_count as u64).encode())
}
Expand All @@ -180,16 +230,16 @@ impl<T: Config> OnRuntimeUpgrade for MigrateToV1<T> {
let current_version = Pallet::<T>::current_storage_version();
let on_chain_version = Pallet::<T>::on_chain_storage_version();

frame_support::ensure!(current_version == 1, "must_upgrade");
frame_support::ensure!(current_version == 2, "must_upgrade");
ensure!(
current_version == on_chain_version,
"after migration, the current_version and on_chain_version should be the same"
);

Buckets::<T>::iter().try_for_each(|(_id, bucket)| -> Result<(), &'static str> {
ensure!(
!bucket.is_removed,
"At this point all the bucket should have is_removed set to false"
bucket.total_customers_usage.is_none(),
"At this point all the bucket should have total_customers_usage set to None"
);
Ok(())
})?;
Expand Down
10 changes: 3 additions & 7 deletions pallets/ddc-verification/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*};

pub mod weights;
use itertools::Itertools;
use sp_staking::StakingInterface;

use crate::weights::WeightInfo;
use sp_staking::StakingInterface;

#[cfg(test)]
pub(crate) mod mock;
Expand All @@ -53,7 +53,7 @@ mod tests;

#[frame_support::pallet]
pub mod pallet {
use ddc_primitives::{traits::StakingVisitor, BucketId, MergeActivityHash, KEY_TYPE};
use ddc_primitives::{BucketId, MergeActivityHash, KEY_TYPE};
use frame_support::PalletId;
use sp_runtime::SaturatedConversion;

Expand Down Expand Up @@ -2864,11 +2864,7 @@ pub mod pallet {
pub fn set_current_validator(origin: OriginFor<T>) -> DispatchResult {
let caller: T::AccountId = ensure_signed(origin)?;


ensure!(
<ValidatorSet<T>>::get().contains(&caller),
Error::<T>::NotValidatorStash
);
ensure!(<ValidatorSet<T>>::get().contains(&caller), Error::<T>::NotValidatorStash);

if Self::is_ocw_validator(caller.clone()) {
log::info!("🏄‍ is_ocw_validator is a validator {:?}", caller.clone());
Expand Down
23 changes: 2 additions & 21 deletions pallets/ddc-verification/src/mock.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use ddc_primitives::{
crypto, sr25519,
traits::{ClusterManager, ClusterQuery, StakingVisitor, StakingVisitorError},
traits::{ClusterManager, ClusterQuery},
BucketId, ClusterNodeKind, ClusterNodeState, ClusterNodeStatus, ClusterNodesStats,
ClusterStatus, PayoutError, PayoutState, StorageNodePubKey, MAX_PAYOUT_BATCH_COUNT,
MAX_PAYOUT_BATCH_SIZE,
Expand Down Expand Up @@ -229,26 +229,7 @@ impl crate::Config for Test {
const MAX_PAYOUT_BATCH_SIZE: u16 = MAX_PAYOUT_BATCH_SIZE;
const MAX_PAYOUT_BATCH_COUNT: u16 = MAX_PAYOUT_BATCH_COUNT;
type ActivityHash = H256;
type StakingVisitor = TestStakingVisitor;
}

pub struct TestStakingVisitor;
impl<T: Config> StakingVisitor<T> for TestStakingVisitor {
fn has_activated_stake(
_node_pub_key: &NodePubKey,
_cluster_id: &ClusterId,
) -> Result<bool, StakingVisitorError> {
Ok(true)
}
fn has_stake(_node_pub_key: &NodePubKey) -> bool {
true
}
fn has_chilling_attempt(_node_pub_key: &NodePubKey) -> Result<bool, StakingVisitorError> {
Ok(false)
}
fn stash_by_ctrl(controller: &T::AccountId) -> Result<T::AccountId, StakingVisitorError> {
Ok(controller.clone())
}
type StakingVisitor = Staking;
}

// Build genesis storage according to the mock runtime.
Expand Down
17 changes: 10 additions & 7 deletions runtime/cere-dev/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -521,8 +521,8 @@ fn transform_session_keys(v: AccountId, old: OldSessionKeys) -> SessionKeys {
im_online: old.im_online,
authority_discovery: old.authority_discovery,
ddc_verification: {

let mut id: ddc_primitives::sr25519::AuthorityId = sp_core::sr25519::Public::from_raw([0u8; 32]).into();
let mut id: ddc_primitives::sr25519::AuthorityId =
sp_core::sr25519::Public::from_raw([0u8; 32]).into();
let id_raw: &mut [u8] = id.as_mut();
id_raw[0..32].copy_from_slice(v.as_ref());
id_raw[0..4].copy_from_slice(b"cer!");
Expand Down Expand Up @@ -1401,7 +1401,13 @@ pub type SignedPayload = generic::SignedPayload<RuntimeCall, SignedExtra>;
pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, RuntimeCall, SignedExtra>;

/// Runtime migrations
type Migrations = (pallet_ddc_customers::migration::MigrateToV1<Runtime>, migrations::Unreleased);
type Migrations = (
pallet_ddc_clusters::migrations::v2::MigrateToV2<Runtime>,
pallet_ddc_staking::migrations::v1::MigrateToV1<Runtime>,
pallet_ddc_customers::migration::MigrateToV2<Runtime>,
pallet_ddc_customers::migration::MigrateToV1<Runtime>,
migrations::Unreleased,
);

/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Expand All @@ -1413,7 +1419,6 @@ pub type Executive = frame_executive::Executive<
Migrations,
>;


pub mod migrations {
use super::*;

Expand All @@ -1427,9 +1432,7 @@ pub mod migrations {
}

/// Unreleased migrations. Add new ones here:
pub type Unreleased = (
UpgradeSessionKeys,
);
pub type Unreleased = (UpgradeSessionKeys,);
}

type EventRecord = frame_system::EventRecord<
Expand Down
11 changes: 5 additions & 6 deletions runtime/cere/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -515,8 +515,8 @@ fn transform_session_keys(v: AccountId, old: OldSessionKeys) -> SessionKeys {
im_online: old.im_online,
authority_discovery: old.authority_discovery,
ddc_verification: {

let mut id: ddc_primitives::sr25519::AuthorityId = sp_core::sr25519::Public::from_raw([0u8; 32]).into();
let mut id: ddc_primitives::sr25519::AuthorityId =
sp_core::sr25519::Public::from_raw([0u8; 32]).into();
let id_raw: &mut [u8] = id.as_mut();
id_raw[0..32].copy_from_slice(v.as_ref());
id_raw[0..4].copy_from_slice(b"cer!");
Expand Down Expand Up @@ -1412,8 +1412,9 @@ pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, RuntimeCall, Si
type Migrations = (
pallet_ddc_clusters::migrations::v2::MigrateToV2<Runtime>,
pallet_ddc_staking::migrations::v1::MigrateToV1<Runtime>,
pallet_ddc_customers::migration::MigrateToV2<Runtime>,
pallet_ddc_customers::migration::MigrateToV1<Runtime>,
migrations::Unreleased
migrations::Unreleased,
);

pub mod migrations {
Expand All @@ -1429,9 +1430,7 @@ pub mod migrations {
}

/// Unreleased migrations. Add new ones here:
pub type Unreleased = (
UpgradeSessionKeys,
);
pub type Unreleased = (UpgradeSessionKeys,);
}
/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Expand Down

0 comments on commit 1e50ecb

Please sign in to comment.