-
Notifications
You must be signed in to change notification settings - Fork 2
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Check current signers still validators #1097
Changes from 24 commits
454c9ad
940b650
a27a9ec
0fea1bf
7f2346d
a54dd56
ae41b50
ee8c1fa
2d91051
f5daa0f
ed32eba
4482fd6
b1d93ac
84cf720
97f54a9
0e8c7b4
4ec4f36
3a58960
1864b4d
4f7167b
3328151
118fd90
516fa7b
3d9bd90
4154e88
75e73f5
3880ff1
afe674e
1154f1e
72c89e5
b5d2ebd
7312376
537f1b4
f44a7e3
0c1dc4b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -55,8 +55,8 @@ pub struct OcwMessageDkg { | |
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] | ||
#[derive(Clone, Encode, Decode, Debug, Eq, PartialEq, TypeInfo)] | ||
pub struct OcwMessageReshare { | ||
// Stash address of new signer | ||
pub new_signer: Vec<u8>, | ||
// Stash addresses of new signers | ||
pub new_signers: Vec<Vec<u8>>, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. A few lines above, in |
||
pub block_number: BlockNumber, | ||
} | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -106,7 +106,7 @@ pub async fn new_reshare( | |
.map_err(|e| ValidatorErr::UserError(e.to_string()))?; | ||
|
||
let old_holder: Option<OldHolder<KeyParams, PartyId>> = | ||
if data.new_signer == my_stash_address.encode() { | ||
if data.new_signers.contains(&my_stash_address.encode()) { | ||
None | ||
} else { | ||
let kvdb_result = app_state.kv_store.kv().get(&hex::encode(NETWORK_PARENT_KEY)).await?; | ||
|
@@ -116,14 +116,15 @@ pub async fn new_reshare( | |
Some(OldHolder { key_share: key_share.0 }) | ||
}; | ||
|
||
let party_ids: BTreeSet<PartyId> = | ||
// new_holders -> From chain next_signers (old_holders (currently forced to be t) + new_holders) | ||
// also acts as verifiers as is everyone in the party | ||
let new_holders: BTreeSet<PartyId> = | ||
validators_info.iter().cloned().map(|x| PartyId::new(x.tss_account)).collect(); | ||
|
||
let pruned_old_holders = | ||
prune_old_holders(&api, &rpc, data.new_signer, validators_info.clone()).await?; | ||
|
||
// old holders -> next_signers - new_signers (will be at least t) | ||
let old_holders = | ||
&prune_old_holders(&api, &rpc, data.new_signers, validators_info.clone()).await?; | ||
Comment on lines
+123
to
+125
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I can't find where the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ya this is a little hard to follow flow this comes from the chain itself here https://github.com/entropyxyz/entropy-core/pull/1097/files#diff-9be73ee9078236e99d5b726b1a2d9acd38446cedbe770d2f23d39ef51b1763ceR720 |
||
let old_holders: BTreeSet<PartyId> = | ||
pruned_old_holders.into_iter().map(|x| PartyId::new(x.tss_account)).collect(); | ||
old_holders.iter().map(|x| PartyId::new(x.tss_account.clone())).collect(); | ||
|
||
let new_holder = NewHolder { | ||
verifying_key: decoded_verifying_key, | ||
|
@@ -139,7 +140,7 @@ pub async fn new_reshare( | |
let inputs = KeyResharingInputs { | ||
old_holder, | ||
new_holder: Some(new_holder), | ||
new_holders: party_ids.clone(), | ||
new_holders: new_holders.clone(), | ||
new_threshold: threshold as usize, | ||
}; | ||
|
||
|
@@ -157,7 +158,6 @@ pub async fn new_reshare( | |
converted_validator_info.push(validator_info.clone()); | ||
tss_accounts.push(validator_info.tss_account.clone()); | ||
} | ||
|
||
let channels = get_channels( | ||
&app_state.listener_state, | ||
converted_validator_info, | ||
|
@@ -169,7 +169,8 @@ pub async fn new_reshare( | |
.await?; | ||
|
||
let (new_key_share, aux_info) = | ||
execute_reshare(session_id.clone(), channels, signer.signer(), inputs, None).await?; | ||
execute_reshare(session_id.clone(), channels, signer.signer(), inputs, &new_holders, None) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe ive misunderstood something, but it looks like There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. explained lower down |
||
.await?; | ||
|
||
let serialized_key_share = key_serialize(&(new_key_share, aux_info)) | ||
.map_err(|_| ProtocolErr::KvSerialize("Kv Serialize Error".to_string()))?; | ||
|
@@ -273,8 +274,8 @@ pub async fn validate_new_reshare( | |
.await? | ||
.ok_or_else(|| ValidatorErr::ChainFetch("Not Currently in a reshare"))?; | ||
|
||
if reshare_data.new_signer != chain_data.new_signer | ||
|| chain_data.block_number != reshare_data.block_number | ||
if chain_data.new_signers != reshare_data.new_signers | ||
|| chain_data.block_number != reshare_data.block_number.saturating_sub(1) | ||
JesseAbram marked this conversation as resolved.
Show resolved
Hide resolved
|
||
{ | ||
return Err(ValidatorErr::InvalidData); | ||
} | ||
|
@@ -365,20 +366,24 @@ pub fn check_forbidden_key(key: &str) -> Result<(), ValidatorErr> { | |
pub async fn prune_old_holders( | ||
api: &OnlineClient<EntropyConfig>, | ||
rpc: &LegacyRpcMethods<EntropyConfig>, | ||
new_signer: Vec<u8>, | ||
new_signers: Vec<Vec<u8>>, | ||
validators_info: Vec<ValidatorInfo>, | ||
) -> Result<Vec<ValidatorInfo>, ValidatorErr> { | ||
Ok(if !new_signer.is_empty() { | ||
let address_slice: &[u8; 32] = &new_signer.clone().try_into().unwrap(); | ||
let new_signer_address = AccountId32(*address_slice); | ||
let new_signer_info = &get_validators_info(api, rpc, vec![new_signer_address]) | ||
.await | ||
.map_err(|e| ValidatorErr::UserError(e.to_string()))?[0]; | ||
validators_info | ||
.iter() | ||
.filter(|x| x.tss_account != new_signer_info.tss_account) | ||
.cloned() | ||
.collect() | ||
Ok(if !new_signers.is_empty() { | ||
let mut filtered_validators_info = vec![]; | ||
for new_signer in new_signers { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This works fine - but since we are anyway about to convert this to a BTreeSet, we can use |
||
let address_slice: &[u8; 32] = &new_signer.clone().try_into().unwrap(); | ||
let new_signer_address = AccountId32(*address_slice); | ||
let new_signer_info = &get_validators_info(api, rpc, vec![new_signer_address]) | ||
.await | ||
.map_err(|e| ValidatorErr::UserError(e.to_string()))?[0]; | ||
filtered_validators_info = validators_info | ||
.iter() | ||
.filter(|x| x.tss_account != new_signer_info.tss_account) | ||
.cloned() | ||
.collect::<Vec<_>>(); | ||
} | ||
filtered_validators_info | ||
} else { | ||
validators_info.clone() | ||
}) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -67,7 +67,7 @@ use synedrion::k256::ecdsa::VerifyingKey; | |
|
||
#[tokio::test] | ||
#[serial] | ||
async fn test_reshare() { | ||
async fn test_reshare_foo() { | ||
JesseAbram marked this conversation as resolved.
Show resolved
Hide resolved
|
||
initialize_test_logger().await; | ||
clean_tests(); | ||
|
||
|
@@ -76,7 +76,7 @@ async fn test_reshare() { | |
let (_validator_ips, _validator_ids) = | ||
spawn_testing_validators(ChainSpecType::Integration).await; | ||
|
||
let validator_ports = vec![3001, 3002, 3003, 3004]; | ||
let validator_ports = vec![3002, 3003, 3004]; | ||
let api = get_api(&cxt.ws_url).await.unwrap(); | ||
let rpc = get_rpc(&cxt.ws_url).await.unwrap(); | ||
|
||
|
@@ -113,13 +113,15 @@ async fn test_reshare() { | |
let new_signer = all_validators.iter().find(|v| !signer_stash_accounts.contains(v)).unwrap(); | ||
|
||
let block_number = TEST_RESHARE_BLOCK_NUMBER; | ||
let onchain_reshare_request = | ||
OcwMessageReshare { new_signer: new_signer.0.to_vec(), block_number }; | ||
let onchain_reshare_request = OcwMessageReshare { | ||
new_signers: vec![new_signer.0.to_vec()], | ||
block_number: block_number - 1, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why change this instead of keeping the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. because this needs to match the onchain info or it will not pass the validate check |
||
}; | ||
|
||
run_to_block(&rpc, block_number + 1).await; | ||
run_to_block(&rpc, block_number).await; | ||
// Send the OCW message to all TS servers who don't have a chain node | ||
let response_results = join_all( | ||
validator_ports[1..] | ||
validator_ports | ||
.iter() | ||
.map(|port| { | ||
client | ||
|
@@ -323,7 +325,8 @@ async fn test_reshare_validation_fail() { | |
let kv = setup_client().await; | ||
|
||
let block_number = rpc.chain_get_header(None).await.unwrap().unwrap().number + 1; | ||
let mut ocw_message = OcwMessageReshare { new_signer: dave.public().encode(), block_number }; | ||
let mut ocw_message = | ||
OcwMessageReshare { new_signers: vec![dave.public().encode()], block_number }; | ||
|
||
let err_stale_data = | ||
validate_new_reshare(&api, &rpc, &ocw_message, &kv).await.map_err(|e| e.to_string()); | ||
|
@@ -361,7 +364,8 @@ async fn test_reshare_validation_fail_not_in_reshare() { | |
let kv = setup_client().await; | ||
|
||
let block_number = rpc.chain_get_header(None).await.unwrap().unwrap().number + 1; | ||
let ocw_message = OcwMessageReshare { new_signer: alice.public().encode(), block_number }; | ||
let ocw_message = | ||
OcwMessageReshare { new_signers: vec![alice.public().encode()], block_number }; | ||
|
||
run_to_block(&rpc, block_number + 1).await; | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -153,19 +153,16 @@ async fn do_reshare(api: &OnlineClient<EntropyConfig>, rpc: &LegacyRpcMethods<En | |
signers.push(server_info); | ||
} | ||
|
||
// Get all validators | ||
let validators_query = entropy::storage().session().validators(); | ||
let all_validators = query_chain(&api, &rpc, validators_query, None).await.unwrap().unwrap(); | ||
|
||
// Get stash account of a non-signer, to become the new signer | ||
// Since we only have 4 nodes in our test setup, this will be the same one the chain chooses | ||
let new_signer = all_validators.iter().find(|v| !signer_stash_accounts.contains(v)).unwrap(); | ||
let reshare_data_query = entropy::storage().staking_extension().reshare_data(); | ||
let reshare_data = query_chain(&api, &rpc, reshare_data_query, None).await.unwrap().unwrap(); | ||
|
||
let block_number = TEST_RESHARE_BLOCK_NUMBER; | ||
let onchain_reshare_request = | ||
OcwMessageReshare { new_signer: new_signer.0.to_vec(), block_number }; | ||
let onchain_reshare_request = OcwMessageReshare { | ||
new_signers: reshare_data.new_signers.into_iter().map(|s| s.to_vec()).collect(), | ||
block_number: block_number - 1, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does this blow up on block 0? Maybe There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the block number is hard coded one line above to a number way over 0 |
||
}; | ||
|
||
run_to_block(&rpc, block_number + 1).await; | ||
run_to_block(&rpc, block_number).await; | ||
// Send the OCW message to all TS servers who don't have a chain node | ||
let client = reqwest::Client::new(); | ||
let response_results = join_all( | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -29,11 +29,13 @@ use frame_support::{ | |
use frame_system::{EventRecord, RawOrigin}; | ||
use pallet_parameters::{SignersInfo, SignersSize}; | ||
use pallet_staking::{ | ||
Event as FrameStakingEvent, MaxNominationsOf, Nominations, Pallet as FrameStaking, | ||
RewardDestination, ValidatorPrefs, | ||
Event as FrameStakingEvent, MaxNominationsOf, MaxValidatorsCount, Nominations, | ||
Pallet as FrameStaking, RewardDestination, ValidatorPrefs, | ||
}; | ||
use sp_std::{vec, vec::Vec}; | ||
|
||
// type MaxValidators<T> = <<T as Config>::BenchmarkingConfig as BenchmarkingConfig>::MaxValidators; | ||
|
||
JesseAbram marked this conversation as resolved.
Show resolved
Hide resolved
|
||
const NULL_ARR: [u8; 32] = [0; 32]; | ||
const SEED: u32 = 0; | ||
|
||
|
@@ -427,13 +429,22 @@ benchmarks! { | |
new_session { | ||
let c in 1 .. MAX_SIGNERS as u32 - 1; | ||
let l in 0 .. MAX_SIGNERS as u32; | ||
let v in 0 .. MAX_SIGNERS as u32; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shouldn't this value be higher than |
||
|
||
let caller: T::AccountId = whitelisted_caller(); | ||
let validator_ids = create_validators::<T>(100, 1); | ||
let second_signer: T::AccountId = account("second_signer", 0, SEED); | ||
let second_signer_id = | ||
<T as pallet_session::Config>::ValidatorId::try_from(second_signer.clone()) | ||
.or(Err(Error::<T>::InvalidValidatorId)) | ||
.unwrap(); | ||
// let mut signers = //create_validators::<T>((MAX_SIGNERS - 1) as u32, SEED); | ||
JesseAbram marked this conversation as resolved.
Show resolved
Hide resolved
|
||
let mut signers = vec![second_signer_id.clone(); c as usize]; | ||
|
||
// For the purpose of the bench these values don't actually matter, we just care that there's a | ||
// storage entry available | ||
SignersInfo::<T>::put(SignersSize { | ||
total_signers: MAX_SIGNERS, | ||
total_signers: 5, | ||
threshold: 3, | ||
last_session_change: 0, | ||
}); | ||
|
@@ -442,23 +453,17 @@ benchmarks! { | |
.or(Err(Error::<T>::InvalidValidatorId)) | ||
.unwrap(); | ||
|
||
let second_signer: T::AccountId = account("second_signer", 0, SEED); | ||
let second_signer_id = | ||
<T as pallet_session::Config>::ValidatorId::try_from(second_signer.clone()) | ||
.or(Err(Error::<T>::InvalidValidatorId)) | ||
.unwrap(); | ||
|
||
// full signer list leaving room for one extra validator | ||
let mut signers = vec![second_signer_id.clone(); c as usize]; | ||
|
||
Signers::<T>::put(signers.clone()); | ||
signers.push(second_signer_id.clone()); | ||
|
||
// place new signer in the signers struct in different locations to calculate random selection | ||
// re-run | ||
signers[l as usize % c as usize] = validator_id.clone(); | ||
// as well validators may be dropped before chosen | ||
signers[l as usize % c as usize] = validator_ids[l as usize % c as usize].clone(); | ||
|
||
for i in 0 .. c { | ||
signers[i as usize] = validator_ids[i as usize].clone(); | ||
} | ||
Signers::<T>::put(signers.clone()); | ||
}: { | ||
let _ = Staking::<T>::new_session_handler(&signers); | ||
let _ = Staking::<T>::new_session_handler(&validator_ids); | ||
} | ||
verify { | ||
assert!(NextSigners::<T>::get().is_some()); | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
im not sure we need this argument as it looks like whenever we call this function
verifiers
is the same asinputs.new_holders
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes originally I changed it to allow for old holders to be more and not in new holders however ran into the #1114 issue, didn't change it back because this is still more right imo and adds no overhead, and hopefully will be needed when the issue is resolved