Skip to content

Commit

Permalink
validate new reshare (#970)
Browse files Browse the repository at this point in the history
* validate new reshare

* add negative tests

* docs

* fix tests

* Update crates/threshold-signature-server/src/helpers/launch.rs

Co-authored-by: peg <[email protected]>

* fix

* add test conditional

---------

Co-authored-by: peg <[email protected]>
  • Loading branch information
JesseAbram and ameba23 authored Jul 31, 2024
1 parent f996455 commit c29e49a
Show file tree
Hide file tree
Showing 7 changed files with 169 additions and 11 deletions.
3 changes: 3 additions & 0 deletions crates/shared/src/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,3 +68,6 @@ pub const TOTAL_SIGNERS: u8 = 3;

/// Threshold for those signers
pub const SIGNER_THRESHOLD: u8 = 2;

/// For testing to line up chain mock data and reshare_test
pub const TEST_RESHARE_BLOCK_NUMBER: u32 = 5;
15 changes: 15 additions & 0 deletions crates/threshold-signature-server/src/helpers/launch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ pub const DEFAULT_DAVE_MNEMONIC: &str =
pub const DEFAULT_EVE_MNEMONIC: &str =
"impact federal dish number fun crisp various wedding radio immense whisper glue";
pub const LATEST_BLOCK_NUMBER_NEW_USER: &str = "LATEST_BLOCK_NUMBER_NEW_USER";
pub const LATEST_BLOCK_NUMBER_RESHARE: &str = "LATEST_BLOCK_NUMBER_RESHARE";

pub const LATEST_BLOCK_NUMBER_PROACTIVE_REFRESH: &str = "LATEST_BLOCK_NUMBER_PROACTIVE_REFRESH";

#[cfg(any(test, feature = "test_helpers"))]
Expand Down Expand Up @@ -373,6 +375,19 @@ pub async fn setup_latest_block_number(kv: &KvManager) -> Result<(), KvError> {
.await
.expect("failed to update latest block number");
}
let exists_result_reshare =
kv.kv().exists(LATEST_BLOCK_NUMBER_RESHARE).await.expect("issue querying DB");
if !exists_result_reshare {
let reservation = kv
.kv()
.reserve_key(LATEST_BLOCK_NUMBER_RESHARE.to_string())
.await
.expect("Issue reserving latest block number");
kv.kv()
.put(reservation, 0u32.to_be_bytes().to_vec())
.await
.expect("failed to update latest block number");
}
Ok(())
}

Expand Down
3 changes: 3 additions & 0 deletions crates/threshold-signature-server/src/user/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -784,15 +784,18 @@ async fn test_jumpstart_network() {
}

// wait for jump start event check that key exists in kvdb
let mut got_jumpstart_event = false;
for _ in 0..45 {
std::thread::sleep(std::time::Duration::from_millis(1000));
let block_hash = rpc.chain_get_block_hash(None).await.unwrap();
let events = EventsClient::new(api.clone()).at(block_hash.unwrap()).await.unwrap();
let jump_start_event = events.find::<entropy::registry::events::FinishedNetworkJumpStart>();
for _event in jump_start_event.flatten() {
got_jumpstart_event = true;
break;
}
}
assert!(got_jumpstart_event);

let response_key = unsafe_get(&client, hex::encode(NETWORK_PARENT_KEY), 3001).await;

Expand Down
52 changes: 49 additions & 3 deletions crates/threshold-signature-server/src/validator/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ use crate::{
},
get_signer_and_x25519_secret,
helpers::{
launch::FORBIDDEN_KEYS,
launch::{FORBIDDEN_KEYS, LATEST_BLOCK_NUMBER_RESHARE},
substrate::{get_stash_address, get_validators_info, query_chain, submit_transaction},
},
signing_client::{protocol_transport::open_protocol_connections, ProtocolErr},
validator::errors::ValidatorErr,
AppState,
};
use axum::{body::Bytes, extract::State, http::StatusCode};
use entropy_kvdb::kv_manager::helpers::serialize as key_serialize;
use entropy_kvdb::kv_manager::{helpers::serialize as key_serialize, KvManager};
pub use entropy_protocol::{
decode_verifying_key,
errors::ProtocolExecutionErr,
Expand Down Expand Up @@ -65,7 +65,7 @@ pub async fn new_reshare(

let api = get_api(&app_state.configuration.endpoint).await?;
let rpc = get_rpc(&app_state.configuration.endpoint).await?;

validate_new_reshare(&api, &rpc, &data, &app_state.kv_store).await?;
let signers_query = entropy::storage().staking_extension().signers();
let signers = query_chain(&api, &rpc, signers_query, None)
.await?
Expand Down Expand Up @@ -219,6 +219,52 @@ pub async fn new_reshare(
Ok(StatusCode::OK)
}

// Validates new reshare endpoint
/// Checks the chain for validity of data and block number of data matches current block
pub async fn validate_new_reshare(
api: &OnlineClient<EntropyConfig>,
rpc: &LegacyRpcMethods<EntropyConfig>,
chain_data: &OcwMessageReshare,
kv_manager: &KvManager,
) -> Result<(), ValidatorErr> {
let last_block_number_recorded = kv_manager.kv().get(LATEST_BLOCK_NUMBER_RESHARE).await?;
if u32::from_be_bytes(
last_block_number_recorded
.try_into()
.map_err(|_| ValidatorErr::Conversion("Block number conversion"))?,
) >= chain_data.block_number
{
return Err(ValidatorErr::RepeatedData);
}

let latest_block_number = rpc
.chain_get_header(None)
.await?
.ok_or_else(|| ValidatorErr::OptionUnwrapError("Failed to get block number".to_string()))?
.number;

// we subtract 1 as the message info is coming from the previous block
if latest_block_number.saturating_sub(1) != chain_data.block_number {
return Err(ValidatorErr::StaleData);
}

let reshare_data_info_query = entropy::storage().staking_extension().reshare_data();
let reshare_data = query_chain(api, rpc, reshare_data_info_query, None)
.await?
.ok_or_else(|| ValidatorErr::ChainFetch("Not Currently in a reshare"))?;

if reshare_data.new_signer != chain_data.new_signer
|| chain_data.block_number != reshare_data.block_number
{
return Err(ValidatorErr::InvalidData);
}
kv_manager.kv().delete(LATEST_BLOCK_NUMBER_RESHARE).await?;
let reservation = kv_manager.kv().reserve_key(LATEST_BLOCK_NUMBER_RESHARE.to_string()).await?;
kv_manager.kv().put(reservation, chain_data.block_number.to_be_bytes().to_vec()).await?;

Ok(())
}

/// Confirms that a validator has succefully reshared.
pub async fn confirm_key_reshare(
api: &OnlineClient<EntropyConfig>,
Expand Down
6 changes: 6 additions & 0 deletions crates/threshold-signature-server/src/validator/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,12 @@ pub enum ValidatorErr {
KvSerialize(String),
#[error("Kv Deserialization Error: {0}")]
KvDeserialize(String),
#[error("Data is stale")]
StaleData,
#[error("Data is not verifiable")]
InvalidData,
#[error("Data is repeated")]
RepeatedData,
}

impl IntoResponse for ValidatorErr {
Expand Down
85 changes: 79 additions & 6 deletions crates/threshold-signature-server/src/validator/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,27 @@ use crate::{
get_api, get_rpc, EntropyConfig,
},
helpers::{
launch::{development_mnemonic, ValidatorName, FORBIDDEN_KEYS},
launch::{
development_mnemonic, ValidatorName, FORBIDDEN_KEYS, LATEST_BLOCK_NUMBER_RESHARE,
},
substrate::submit_transaction,
tests::{initialize_test_logger, spawn_testing_validators, unsafe_get},
tests::{
initialize_test_logger, run_to_block, setup_client, spawn_testing_validators,
unsafe_get,
},
validator::get_signer_and_x25519_secret_from_mnemonic,
},
validator::errors::ValidatorErr,
validator::{api::validate_new_reshare, errors::ValidatorErr},
};
use entropy_kvdb::clean_tests;
use entropy_shared::{OcwMessageReshare, EVE_VERIFYING_KEY, MIN_BALANCE, NETWORK_PARENT_KEY};
use entropy_shared::{
OcwMessageReshare, EVE_VERIFYING_KEY, MIN_BALANCE, NETWORK_PARENT_KEY,
TEST_RESHARE_BLOCK_NUMBER,
};
use entropy_testing_utils::{
constants::{ALICE_STASH_ADDRESS, RANDOM_ACCOUNT},
substrate_context::{test_node_process_testing_state, testing_context},
test_context_stationary,
};
use futures::future::join_all;
use parity_scale_codec::Encode;
Expand All @@ -55,15 +64,18 @@ async fn test_reshare() {
let rpc = get_rpc(&cxt.ws_url).await.unwrap();

let client = reqwest::Client::new();
let block_number = rpc.chain_get_header(None).await.unwrap().unwrap().number + 1;
let mut key_shares_before = vec![];
for port in &validator_ports {
key_shares_before.push(unsafe_get(&client, hex::encode(NETWORK_PARENT_KEY), *port).await);
}

setup_for_reshare(&api, &rpc).await;

let block_number = TEST_RESHARE_BLOCK_NUMBER;
let onchain_reshare_request =
OcwMessageReshare { new_signer: alice.public().encode(), block_number };
setup_for_reshare(&api, &rpc).await;

run_to_block(&rpc, block_number + 1).await;

let response_results = join_all(
validator_ports
Expand All @@ -86,6 +98,67 @@ async fn test_reshare() {
unsafe_get(&client, hex::encode(NETWORK_PARENT_KEY), validator_ports[i]).await
);
}
clean_tests();
}

#[tokio::test]
#[serial]
async fn test_reshare_validation_fail() {
initialize_test_logger().await;
clean_tests();

let dave = AccountKeyring::Dave;
let cxt = test_node_process_testing_state(true).await;
let api = get_api(&cxt.ws_url).await.unwrap();
let rpc = get_rpc(&cxt.ws_url).await.unwrap();
let kv = setup_client().await;

let block_number = rpc.chain_get_header(None).await.unwrap().unwrap().number + 1;
let mut ocw_message = OcwMessageReshare { new_signer: dave.public().encode(), block_number };

let err_stale_data =
validate_new_reshare(&api, &rpc, &ocw_message, &kv).await.map_err(|e| e.to_string());
assert_eq!(err_stale_data, Err("Data is stale".to_string()));

let block_number = rpc.chain_get_header(None).await.unwrap().unwrap().number + 1;
ocw_message.block_number = block_number;
run_to_block(&rpc, block_number + 1).await;

let err_incorrect_data =
validate_new_reshare(&api, &rpc, &ocw_message, &kv).await.map_err(|e| e.to_string());
assert_eq!(err_incorrect_data, Err("Data is not verifiable".to_string()));

// manipulates kvdb to get to repeated data error
kv.kv().delete(LATEST_BLOCK_NUMBER_RESHARE).await.unwrap();
let reservation = kv.kv().reserve_key(LATEST_BLOCK_NUMBER_RESHARE.to_string()).await.unwrap();
kv.kv().put(reservation, (block_number + 5).to_be_bytes().to_vec()).await.unwrap();

let err_stale_data =
validate_new_reshare(&api, &rpc, &ocw_message, &kv).await.map_err(|e| e.to_string());
assert_eq!(err_stale_data, Err("Data is repeated".to_string()));
clean_tests();
}

#[tokio::test]
#[serial]
async fn test_reshare_validation_fail_not_in_reshare() {
initialize_test_logger().await;
clean_tests();

let alice = AccountKeyring::Alice;
let cxt = test_context_stationary().await;
let api = get_api(&cxt.node_proc.ws_url).await.unwrap();
let rpc = get_rpc(&cxt.node_proc.ws_url).await.unwrap();
let kv = setup_client().await;

let block_number = rpc.chain_get_header(None).await.unwrap().unwrap().number + 1;
let ocw_message = OcwMessageReshare { new_signer: alice.public().encode(), block_number };

run_to_block(&rpc, block_number + 1).await;

let err_not_in_reshare =
validate_new_reshare(&api, &rpc, &ocw_message, &kv).await.map_err(|e| e.to_string());
assert_eq!(err_not_in_reshare, Err("Chain Fetch: Not Currently in a reshare".to_string()));

clean_tests();
}
Expand Down
16 changes: 14 additions & 2 deletions pallets/staking/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,9 @@ use sp_staking::SessionIndex;

#[frame_support::pallet]
pub mod pallet {
use entropy_shared::{ValidatorInfo, X25519PublicKey, SIGNING_PARTY_SIZE};
use entropy_shared::{
ValidatorInfo, X25519PublicKey, SIGNING_PARTY_SIZE, TEST_RESHARE_BLOCK_NUMBER,
};
use frame_support::{
dispatch::{DispatchResult, DispatchResultWithPostInfo},
pallet_prelude::*,
Expand Down Expand Up @@ -231,12 +233,22 @@ pub mod pallet {
proactive_refresh_keys: self.proactive_refresh_data.1.clone(),
};
ProactiveRefresh::<T>::put(refresh_info);

// mocks a signer rotation for tss new_reshare tests
if self.mock_signer_rotate {
NextSigners::<T>::put(NextSignerInfo {
next_signers: self.inital_signers.clone(),
confirmations: vec![],
});

ReshareData::<T>::put(ReshareInfo {
// To give enough time for test_reshare setup
block_number: TEST_RESHARE_BLOCK_NUMBER.into(),
// Alice signer public key
new_signer: vec![
212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214,
130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125,
],
})
}
}
}
Expand Down

0 comments on commit c29e49a

Please sign in to comment.