From 40c861e5a2400269f51a2e553c18c127f0be917e Mon Sep 17 00:00:00 2001 From: haiyizxx Date: Wed, 17 Jan 2024 13:13:48 -0500 Subject: [PATCH 01/54] fix: use correct key id to generate pub key (#234) * fix: use correct key id to generate pub key * add logging --- ampd/src/commands/register_public_key.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/ampd/src/commands/register_public_key.rs b/ampd/src/commands/register_public_key.rs index e33622a13..93d353cf8 100644 --- a/ampd/src/commands/register_public_key.rs +++ b/ampd/src/commands/register_public_key.rs @@ -8,9 +8,11 @@ use multisig::{ msg::ExecuteMsg, }; use report::ResultCompatExt; +use tracing::info; use crate::commands::{broadcast_tx, worker_pub_key}; use crate::config::Config; +use crate::tofnd::grpc::{MultisigClient, SharableEcdsaClient}; use crate::types::TMAddress; use crate::{handlers, Error, PREFIX}; @@ -19,8 +21,19 @@ pub async fn run(config: Config, state_path: &Path) -> Result, Er let multisig_address = get_multisig_address(&config)?; + let tofnd_config = config.tofnd_config.clone(); + let multisig_key = SharableEcdsaClient::new( + MultisigClient::connect(tofnd_config.party_uid, tofnd_config.url) + .await + .change_context(Error::Connection)?, + ) + .keygen(&multisig_address.to_string()) + .await + .change_context(Error::Tofnd)?; + info!(key_id = multisig_address.to_string(), "keygen successful"); + let msg = serde_json::to_vec(&ExecuteMsg::RegisterPublicKey { - public_key: PublicKey::try_from((KeyType::Ecdsa, pub_key.to_bytes().into())) + public_key: PublicKey::try_from((KeyType::Ecdsa, multisig_key.to_bytes().into())) .change_context(Error::Tofnd)?, }) .expect("register public key msg should serialize"); From ee447b5344006f7a84360b6fab8b8583e5863c38 Mon Sep 17 00:00:00 2001 From: eguajardo Date: Fri, 19 Jan 2024 15:39:14 -0600 Subject: [PATCH 02/54] update contract to split execute and query (#240) --- contracts/multisig/src/contract.rs | 254 +-------------------- contracts/multisig/src/contract/execute.rs | 207 +++++++++++++++++ contracts/multisig/src/contract/query.rs | 35 +++ 3 files changed, 245 insertions(+), 251 deletions(-) create mode 100644 contracts/multisig/src/contract/execute.rs create mode 100644 contracts/multisig/src/contract/query.rs diff --git a/contracts/multisig/src/contract.rs b/contracts/multisig/src/contract.rs index 74bab939d..b1b218ec6 100644 --- a/contracts/multisig/src/contract.rs +++ b/contracts/multisig/src/contract.rs @@ -15,6 +15,9 @@ use crate::{ ContractError, }; +mod execute; +mod query; + #[cfg_attr(not(feature = "library"), entry_point)] pub fn instantiate( deps: DepsMut, @@ -83,219 +86,6 @@ pub fn execute( .map_err(axelar_wasm_std::ContractError::from) } -pub mod execute { - use connection_router::state::ChainName; - use cosmwasm_std::WasmMsg; - - use crate::signing::validate_session_signature; - use crate::state::{load_session_signatures, save_signature}; - use crate::worker_set::WorkerSet; - use crate::{ - key::{KeyTyped, PublicKey, Signature}, - signing::SigningSession, - state::{AUTHORIZED_CALLERS, PUB_KEYS}, - }; - use error_stack::ResultExt; - - use super::*; - - pub fn start_signing_session( - deps: DepsMut, - worker_set_id: String, - msg: MsgToSign, - chain_name: ChainName, - ) -> Result { - let worker_set = get_worker_set(deps.storage, &worker_set_id)?; - - let session_id = SIGNING_SESSION_COUNTER.update( - deps.storage, - |mut counter| -> Result { - counter += Uint64::one(); - Ok(counter) - }, - )?; - - let signing_session = SigningSession::new(session_id, worker_set_id.clone(), msg.clone()); - - SIGNING_SESSIONS.save(deps.storage, session_id.into(), &signing_session)?; - - let event = Event::SigningStarted { - session_id, - worker_set_id, - pub_keys: worker_set.get_pub_keys(), - msg, - chain_name, - }; - - Ok(Response::new() - .set_data(to_binary(&session_id)?) - .add_event(event.into())) - } - - pub fn submit_signature( - deps: DepsMut, - env: Env, - info: MessageInfo, - session_id: Uint64, - signature: HexBinary, - ) -> Result { - let config = CONFIG.load(deps.storage)?; - let mut session = SIGNING_SESSIONS - .load(deps.storage, session_id.into()) - .map_err(|_| ContractError::SigningSessionNotFound { session_id })?; - let worker_set = WORKER_SETS.load(deps.storage, &session.worker_set_id)?; - - let pub_key = match worker_set.signers.get(&info.sender.to_string()) { - Some(signer) => Ok(&signer.pub_key), - None => Err(ContractError::NotAParticipant { - session_id, - signer: info.sender.to_string(), - }), - }?; - - let signature: Signature = (pub_key.key_type(), signature).try_into()?; - - validate_session_signature( - &session, - &info.sender, - &signature, - pub_key, - config.grace_period, - env.block.height, - )?; - let signature = save_signature(deps.storage, session_id, signature, &info.sender)?; - - let signatures = load_session_signatures(deps.storage, session_id.u64())?; - - let old_state = session.state.clone(); - - session.recalculate_session_state(&signatures, &worker_set, env.block.height); - SIGNING_SESSIONS.save(deps.storage, session.id.u64(), &session)?; - - let state_changed = old_state != session.state; - - signing_response( - session_id, - session.state, - state_changed, - info.sender, - signature, - config.rewards_contract.into_string(), - ) - } - - pub fn register_worker_set( - deps: DepsMut, - worker_set: WorkerSet, - ) -> Result { - let worker_set_id = worker_set.id(); - WORKER_SETS.save(deps.storage, &worker_set_id, &worker_set)?; - - Ok(Response::default()) - } - - pub fn register_pub_key( - deps: DepsMut, - info: MessageInfo, - public_key: PublicKey, - ) -> Result { - PUB_KEYS.save( - deps.storage, - (info.sender.clone(), public_key.key_type()), - &public_key.clone().into(), - )?; - - Ok(Response::new().add_event( - Event::PublicKeyRegistered { - worker: info.sender, - public_key, - } - .into(), - )) - } - - pub fn require_authorized_caller( - deps: &DepsMut, - contract_address: Addr, - ) -> error_stack::Result<(), ContractError> { - AUTHORIZED_CALLERS - .load(deps.storage, &contract_address) - .change_context(ContractError::Unauthorized) - } - - pub fn authorize_caller( - deps: DepsMut, - contract_address: Addr, - ) -> Result { - AUTHORIZED_CALLERS.save(deps.storage, &contract_address, &())?; - - Ok(Response::new().add_event(Event::CallerAuthorized { contract_address }.into())) - } - - pub fn unauthorize_caller( - deps: DepsMut, - contract_address: Addr, - ) -> Result { - AUTHORIZED_CALLERS.remove(deps.storage, &contract_address); - - Ok(Response::new().add_event(Event::CallerUnauthorized { contract_address }.into())) - } - - pub fn require_governance(deps: &DepsMut, sender: Addr) -> Result<(), ContractError> { - let config = CONFIG.load(deps.storage)?; - if config.governance != sender { - return Err(ContractError::Unauthorized); - } - Ok(()) - } - - fn signing_response( - session_id: Uint64, - session_state: MultisigState, - state_changed: bool, - signer: Addr, - signature: Signature, - rewards_contract: String, - ) -> Result { - let rewards_msg = WasmMsg::Execute { - contract_addr: rewards_contract, - msg: to_binary(&rewards::msg::ExecuteMsg::RecordParticipation { - event_id: session_id - .to_string() - .try_into() - .expect("couldn't convert session_id to nonempty string"), - worker_address: signer.to_string(), - })?, - funds: vec![], - }; - - let event = Event::SignatureSubmitted { - session_id, - participant: signer, - signature, - }; - - let mut response = Response::new() - .add_message(rewards_msg) - .add_event(event.into()); - - if let MultisigState::Completed { completed_at } = session_state { - if state_changed { - // only send event if state changed - response = response.add_event( - Event::SigningCompleted { - session_id, - completed_at, - } - .into(), - ) - } - } - - Ok(response) - } -} - #[cfg_attr(not(feature = "library"), entry_point)] pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult { match msg { @@ -314,44 +104,6 @@ pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult { } } -pub mod query { - use crate::{ - key::{KeyType, PublicKey}, - state::{load_session_signatures, PUB_KEYS}, - worker_set::WorkerSet, - }; - - use super::*; - - pub fn get_multisig(deps: Deps, session_id: Uint64) -> StdResult { - let session = SIGNING_SESSIONS.load(deps.storage, session_id.into())?; - - let worker_set = WORKER_SETS.load(deps.storage, &session.worker_set_id)?; - let signatures = load_session_signatures(deps.storage, session.id.u64())?; - - let signers_with_sigs = worker_set - .signers - .into_iter() - .map(|(address, signer)| (signer, signatures.get(&address).cloned())) - .collect::>(); - - Ok(Multisig { - state: session.state, - quorum: worker_set.threshold, - signers: signers_with_sigs, - }) - } - - pub fn get_worker_set(deps: Deps, worker_set_id: String) -> StdResult { - WORKER_SETS.load(deps.storage, &worker_set_id) - } - - pub fn get_public_key(deps: Deps, worker: Addr, key_type: KeyType) -> StdResult { - let raw = PUB_KEYS.load(deps.storage, (worker, key_type))?; - Ok(PublicKey::try_from((key_type, raw)).expect("could not decode pub key")) - } -} - #[cfg(test)] mod tests { use std::vec; diff --git a/contracts/multisig/src/contract/execute.rs b/contracts/multisig/src/contract/execute.rs new file mode 100644 index 000000000..12f6f01dd --- /dev/null +++ b/contracts/multisig/src/contract/execute.rs @@ -0,0 +1,207 @@ +use connection_router::state::ChainName; +use cosmwasm_std::WasmMsg; + +use crate::signing::validate_session_signature; +use crate::state::{load_session_signatures, save_signature}; +use crate::worker_set::WorkerSet; +use crate::{ + key::{KeyTyped, PublicKey, Signature}, + signing::SigningSession, + state::{AUTHORIZED_CALLERS, PUB_KEYS}, +}; +use error_stack::ResultExt; + +use super::*; + +pub fn start_signing_session( + deps: DepsMut, + worker_set_id: String, + msg: MsgToSign, + chain_name: ChainName, +) -> Result { + let worker_set = get_worker_set(deps.storage, &worker_set_id)?; + + let session_id = SIGNING_SESSION_COUNTER.update( + deps.storage, + |mut counter| -> Result { + counter += Uint64::one(); + Ok(counter) + }, + )?; + + let signing_session = SigningSession::new(session_id, worker_set_id.clone(), msg.clone()); + + SIGNING_SESSIONS.save(deps.storage, session_id.into(), &signing_session)?; + + let event = Event::SigningStarted { + session_id, + worker_set_id, + pub_keys: worker_set.get_pub_keys(), + msg, + chain_name, + }; + + Ok(Response::new() + .set_data(to_binary(&session_id)?) + .add_event(event.into())) +} + +pub fn submit_signature( + deps: DepsMut, + env: Env, + info: MessageInfo, + session_id: Uint64, + signature: HexBinary, +) -> Result { + let config = CONFIG.load(deps.storage)?; + let mut session = SIGNING_SESSIONS + .load(deps.storage, session_id.into()) + .map_err(|_| ContractError::SigningSessionNotFound { session_id })?; + let worker_set = WORKER_SETS.load(deps.storage, &session.worker_set_id)?; + + let pub_key = match worker_set.signers.get(&info.sender.to_string()) { + Some(signer) => Ok(&signer.pub_key), + None => Err(ContractError::NotAParticipant { + session_id, + signer: info.sender.to_string(), + }), + }?; + + let signature: Signature = (pub_key.key_type(), signature).try_into()?; + + validate_session_signature( + &session, + &info.sender, + &signature, + pub_key, + config.grace_period, + env.block.height, + )?; + let signature = save_signature(deps.storage, session_id, signature, &info.sender)?; + + let signatures = load_session_signatures(deps.storage, session_id.u64())?; + + let old_state = session.state.clone(); + + session.recalculate_session_state(&signatures, &worker_set, env.block.height); + SIGNING_SESSIONS.save(deps.storage, session.id.u64(), &session)?; + + let state_changed = old_state != session.state; + + signing_response( + session_id, + session.state, + state_changed, + info.sender, + signature, + config.rewards_contract.into_string(), + ) +} + +pub fn register_worker_set( + deps: DepsMut, + worker_set: WorkerSet, +) -> Result { + let worker_set_id = worker_set.id(); + WORKER_SETS.save(deps.storage, &worker_set_id, &worker_set)?; + + Ok(Response::default()) +} + +pub fn register_pub_key( + deps: DepsMut, + info: MessageInfo, + public_key: PublicKey, +) -> Result { + PUB_KEYS.save( + deps.storage, + (info.sender.clone(), public_key.key_type()), + &public_key.clone().into(), + )?; + + Ok(Response::new().add_event( + Event::PublicKeyRegistered { + worker: info.sender, + public_key, + } + .into(), + )) +} + +pub fn require_authorized_caller( + deps: &DepsMut, + contract_address: Addr, +) -> error_stack::Result<(), ContractError> { + AUTHORIZED_CALLERS + .load(deps.storage, &contract_address) + .change_context(ContractError::Unauthorized) +} + +pub fn authorize_caller(deps: DepsMut, contract_address: Addr) -> Result { + AUTHORIZED_CALLERS.save(deps.storage, &contract_address, &())?; + + Ok(Response::new().add_event(Event::CallerAuthorized { contract_address }.into())) +} + +pub fn unauthorize_caller( + deps: DepsMut, + contract_address: Addr, +) -> Result { + AUTHORIZED_CALLERS.remove(deps.storage, &contract_address); + + Ok(Response::new().add_event(Event::CallerUnauthorized { contract_address }.into())) +} + +pub fn require_governance(deps: &DepsMut, sender: Addr) -> Result<(), ContractError> { + let config = CONFIG.load(deps.storage)?; + if config.governance != sender { + return Err(ContractError::Unauthorized); + } + Ok(()) +} + +fn signing_response( + session_id: Uint64, + session_state: MultisigState, + state_changed: bool, + signer: Addr, + signature: Signature, + rewards_contract: String, +) -> Result { + let rewards_msg = WasmMsg::Execute { + contract_addr: rewards_contract, + msg: to_binary(&rewards::msg::ExecuteMsg::RecordParticipation { + event_id: session_id + .to_string() + .try_into() + .expect("couldn't convert session_id to nonempty string"), + worker_address: signer.to_string(), + })?, + funds: vec![], + }; + + let event = Event::SignatureSubmitted { + session_id, + participant: signer, + signature, + }; + + let mut response = Response::new() + .add_message(rewards_msg) + .add_event(event.into()); + + if let MultisigState::Completed { completed_at } = session_state { + if state_changed { + // only send event if state changed + response = response.add_event( + Event::SigningCompleted { + session_id, + completed_at, + } + .into(), + ) + } + } + + Ok(response) +} diff --git a/contracts/multisig/src/contract/query.rs b/contracts/multisig/src/contract/query.rs new file mode 100644 index 000000000..1f6b51fec --- /dev/null +++ b/contracts/multisig/src/contract/query.rs @@ -0,0 +1,35 @@ +use crate::{ + key::{KeyType, PublicKey}, + state::{load_session_signatures, PUB_KEYS}, + worker_set::WorkerSet, +}; + +use super::*; + +pub fn get_multisig(deps: Deps, session_id: Uint64) -> StdResult { + let session = SIGNING_SESSIONS.load(deps.storage, session_id.into())?; + + let worker_set = WORKER_SETS.load(deps.storage, &session.worker_set_id)?; + let signatures = load_session_signatures(deps.storage, session.id.u64())?; + + let signers_with_sigs = worker_set + .signers + .into_iter() + .map(|(address, signer)| (signer, signatures.get(&address).cloned())) + .collect::>(); + + Ok(Multisig { + state: session.state, + quorum: worker_set.threshold, + signers: signers_with_sigs, + }) +} + +pub fn get_worker_set(deps: Deps, worker_set_id: String) -> StdResult { + WORKER_SETS.load(deps.storage, &worker_set_id) +} + +pub fn get_public_key(deps: Deps, worker: Addr, key_type: KeyType) -> StdResult { + let raw = PUB_KEYS.load(deps.storage, (worker, key_type))?; + Ok(PublicKey::try_from((key_type, raw)).expect("could not decode pub key")) +} From dec716e43a8955bb64bb820a56dca7478395f20e Mon Sep 17 00:00:00 2001 From: Houmaan Chamani Date: Tue, 23 Jan 2024 18:36:50 -0500 Subject: [PATCH 03/54] feat: add expiry field of u64 to SigningStarted event (#243) * feat: add expiry field of u64 to SigningStarted event * fix: correct a comment word on contracts/multisig/src/state.rs Co-authored-by: Sammy * fix: update attribute name of expiry field Co-authored-by: Sammy --------- Co-authored-by: Sammy --- ampd/src/handlers/multisig.rs | 1 + contracts/multisig-prover/src/test/multicontract.rs | 3 +++ contracts/multisig/src/contract.rs | 5 +++++ contracts/multisig/src/contract/execute.rs | 5 +++++ contracts/multisig/src/events.rs | 5 ++++- contracts/multisig/src/msg.rs | 1 + contracts/multisig/src/state.rs | 1 + integration-tests/tests/test_utils/mod.rs | 3 +++ 8 files changed, 23 insertions(+), 1 deletion(-) diff --git a/ampd/src/handlers/multisig.rs b/ampd/src/handlers/multisig.rs index 8c1d469b9..22a4d9556 100644 --- a/ampd/src/handlers/multisig.rs +++ b/ampd/src/handlers/multisig.rs @@ -243,6 +243,7 @@ mod test { pub_keys, msg: MsgToSign::unchecked(rand_message()), chain_name: rand_chain_name(), + expires_at: 100u64, }; let mut event: cosmwasm_std::Event = poll_started.into(); diff --git a/contracts/multisig-prover/src/test/multicontract.rs b/contracts/multisig-prover/src/test/multicontract.rs index 8e23b2179..7372c92fb 100644 --- a/contracts/multisig-prover/src/test/multicontract.rs +++ b/contracts/multisig-prover/src/test/multicontract.rs @@ -6,6 +6,8 @@ use super::{mocks, test_data}; pub const INSTANTIATOR: &str = "instantiator"; pub const RELAYER: &str = "relayer"; +pub const SIGNATURE_BLOCK_EXPIRY: u64 = 100; + pub struct TestCaseConfig { pub app: App, pub admin: Addr, @@ -45,6 +47,7 @@ fn instantiate_mock_multisig(app: &mut App) -> Addr { let msg = multisig::msg::InstantiateMsg { governance_address: "governance".parse().unwrap(), rewards_address: "rewards".to_string(), + block_expiry: SIGNATURE_BLOCK_EXPIRY, grace_period: 2, }; diff --git a/contracts/multisig/src/contract.rs b/contracts/multisig/src/contract.rs index b1b218ec6..891045eda 100644 --- a/contracts/multisig/src/contract.rs +++ b/contracts/multisig/src/contract.rs @@ -28,6 +28,7 @@ pub fn instantiate( let config = Config { governance: deps.api.addr_validate(&msg.governance_address)?, rewards_contract: deps.api.addr_validate(&msg.rewards_address)?, + block_expiry: msg.block_expiry, grace_period: msg.grace_period, }; CONFIG.save(deps.storage, &config)?; @@ -58,6 +59,7 @@ pub fn execute( .transpose()?; // TODO: handle callback execute::start_signing_session( deps, + env, worker_set_id, msg.try_into() .map_err(axelar_wasm_std::ContractError::from)?, @@ -131,6 +133,8 @@ mod tests { const PROVER: &str = "prover"; const REWARDS_CONTRACT: &str = "rewards"; + const SIGNATURE_BLOCK_EXPIRY: u64 = 100; + fn do_instantiate(deps: DepsMut) -> Result { let info = mock_info(INSTANTIATOR, &[]); let env = mock_env(); @@ -138,6 +142,7 @@ mod tests { let msg = InstantiateMsg { governance_address: "governance".parse().unwrap(), rewards_address: REWARDS_CONTRACT.to_string(), + block_expiry: SIGNATURE_BLOCK_EXPIRY, grace_period: 2, }; diff --git a/contracts/multisig/src/contract/execute.rs b/contracts/multisig/src/contract/execute.rs index 12f6f01dd..7167473da 100644 --- a/contracts/multisig/src/contract/execute.rs +++ b/contracts/multisig/src/contract/execute.rs @@ -15,10 +15,12 @@ use super::*; pub fn start_signing_session( deps: DepsMut, + env: Env, worker_set_id: String, msg: MsgToSign, chain_name: ChainName, ) -> Result { + let config = CONFIG.load(deps.storage)?; let worker_set = get_worker_set(deps.storage, &worker_set_id)?; let session_id = SIGNING_SESSION_COUNTER.update( @@ -33,12 +35,15 @@ pub fn start_signing_session( SIGNING_SESSIONS.save(deps.storage, session_id.into(), &signing_session)?; + let expires_at = env.block.height + config.block_expiry; + let event = Event::SigningStarted { session_id, worker_set_id, pub_keys: worker_set.get_pub_keys(), msg, chain_name, + expires_at, }; Ok(Response::new() diff --git a/contracts/multisig/src/events.rs b/contracts/multisig/src/events.rs index 4df66a2ac..3a9e08290 100644 --- a/contracts/multisig/src/events.rs +++ b/contracts/multisig/src/events.rs @@ -17,6 +17,7 @@ pub enum Event { pub_keys: HashMap, msg: MsgToSign, chain_name: ChainName, + expires_at: u64, }, // Emitted when a participants submits a signature SignatureSubmitted { @@ -50,6 +51,7 @@ impl From for cosmwasm_std::Event { pub_keys, msg, chain_name: chain, + expires_at, } => cosmwasm_std::Event::new("signing_started") .add_attribute("session_id", session_id) .add_attribute("worker_set_id", worker_set_id) @@ -59,7 +61,8 @@ impl From for cosmwasm_std::Event { .expect("violated invariant: pub_keys are not serializable"), ) .add_attribute("msg", HexBinary::from(msg).to_hex()) - .add_attribute("chain", chain), + .add_attribute("chain", chain) + .add_attribute("expires_at", expires_at.to_string()), Event::SignatureSubmitted { session_id, participant, diff --git a/contracts/multisig/src/msg.rs b/contracts/multisig/src/msg.rs index 6b444d7bf..57e9c975f 100644 --- a/contracts/multisig/src/msg.rs +++ b/contracts/multisig/src/msg.rs @@ -13,6 +13,7 @@ pub struct InstantiateMsg { // the governance address is allowed to modify the authorized caller list for this contract pub governance_address: String, pub rewards_address: String, + pub block_expiry: u64, pub grace_period: u64, // in blocks after session has been completed } diff --git a/contracts/multisig/src/state.rs b/contracts/multisig/src/state.rs index 25f945ce2..6a409b839 100644 --- a/contracts/multisig/src/state.rs +++ b/contracts/multisig/src/state.rs @@ -15,6 +15,7 @@ use crate::{ pub struct Config { pub governance: Addr, pub rewards_contract: Addr, + pub block_expiry: u64, // number of blocks after which a signing session expires pub grace_period: u64, // TODO: add update mechanism to change this after instantiation } diff --git a/integration-tests/tests/test_utils/mod.rs b/integration-tests/tests/test_utils/mod.rs index 32238e605..d1fc7266f 100644 --- a/integration-tests/tests/test_utils/mod.rs +++ b/integration-tests/tests/test_utils/mod.rs @@ -20,6 +20,8 @@ use tofn::ecdsa::KeyPair; pub const AXL_DENOMINATION: &str = "uaxl"; +pub const SIGNATURE_BLOCK_EXPIRY: u64 = 100; + fn get_event_attribute<'a>( events: &'a [Event], event_type: &str, @@ -335,6 +337,7 @@ pub fn setup_protocol(service_name: nonempty::String) -> Protocol { multisig::msg::InstantiateMsg { rewards_address: rewards_address.to_string(), governance_address: governance_address.to_string(), + block_expiry: SIGNATURE_BLOCK_EXPIRY, grace_period: 2, }, ); From 4197d825dcfdfc357db3b1b09ee882c6c54ab34b Mon Sep 17 00:00:00 2001 From: Houmaan Chamani Date: Wed, 24 Jan 2024 10:08:22 -0500 Subject: [PATCH 04/54] docs: axe-2947-update-ampd-docs (#236) * Update connection_router.md: add note for the second diagram * Update connection_router.md: include brief explanation of main entities * Update overview.md: update Message struct * docs: edit notes on connection_router.md, add CrossChainId struct to overview.md * fix: remove entity structure from connection_router.md --- doc/src/contracts/connection_router.md | 12 +++++++++--- doc/src/overview.md | 17 +++++++++++------ 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/doc/src/contracts/connection_router.md b/doc/src/contracts/connection_router.md index 0b2aa161b..319756c57 100644 --- a/doc/src/contracts/connection_router.md +++ b/doc/src/contracts/connection_router.md @@ -88,6 +88,9 @@ pub struct MessageRouted { } ``` + + + ## Connection Router graph ```mermaid @@ -135,12 +138,15 @@ Relayer A->>+Gateway A: gateway::ExecuteMsg::RouteMessages Gateway A->>+Connection Router: connection_router::ExecuteMsg::RouteMessages Connection Router->>+Gateway B: gateway::ExecuteMsg::RouteMessages Multisig Prover->>+Relayer B: constructs proof -Relayer B->>+External Gateway B: +Relayer B->>+External Gateway B: sends proof ``` 1. The External Gateway emits an event that is picked up by the Relayer. 2. Relayer relays the event to the Gateway as a message. 3. Gateway receives the incoming messages, verifies the messages, and then passes the messages to the Connection Router. -4. Connection Router sends outgoing messages to the destination Gateway. +4. Connection router sends the outgoing messages from Gateway A to Gateway B, which is the representation of destination chain on Axelar chain. 5. The Multisig Prover takes the messages stored in the destination Gateway and constructs a proof. -6. The Relayer sends the proof to the destination's External Gateway. \ No newline at end of file +6. The Relayer sends the proof, which also contains messages, to the destination's External Gateway. + +### Notes +1. External Gateways are deployed on blockchains other than Axelar, such as Ethereum and Avalanche, while internal gateways reside on Axelar's chain. diff --git a/doc/src/overview.md b/doc/src/overview.md index 704126501..ed8e9bb01 100644 --- a/doc/src/overview.md +++ b/doc/src/overview.md @@ -4,12 +4,17 @@ Structure of a routing packet (`M` in the diagrams) ```rust struct Message { - id: String, //should be formatted as [source_chain]:[unique identifier], i.e. Ethereum:0x74ac0205b1f8f51023942856145182f0e6fdd41ccb2c8058bf2d89fc67564d56:0 - source_address: String, - source_chain: String, - destination_address: String, - destination_chain: String, - payload_hash: HexBinary + cc_id: CrossChainId, + source_address: Address, // String + destination_chain: ChainName, // String + destination_address: Address, + payload_hash: [u8; 32] + } + + // Combination of chain name and a unique identifier + pub struct CrossChainId { + pub chain: ChainName, + pub id: nonempty::String, } ``` From aac6602dba590083ac81e0630baa32cfb9876082 Mon Sep 17 00:00:00 2001 From: Christian Gorenflo Date: Wed, 24 Jan 2024 19:01:06 -0500 Subject: [PATCH 05/54] feat: add message ids to proof under construction event (#224) --- contracts/multisig-prover/src/events.rs | 54 +++++++++++++++++++++--- contracts/multisig-prover/src/reply.rs | 7 ++++ packages/axelar-wasm-std/src/event.rs | 55 +++++++++++++++++++++++++ packages/axelar-wasm-std/src/lib.rs | 1 + 4 files changed, 112 insertions(+), 5 deletions(-) create mode 100644 packages/axelar-wasm-std/src/event.rs diff --git a/contracts/multisig-prover/src/events.rs b/contracts/multisig-prover/src/events.rs index 1289f3eba..23f1c1698 100644 --- a/contracts/multisig-prover/src/events.rs +++ b/contracts/multisig-prover/src/events.rs @@ -1,12 +1,14 @@ -use cosmwasm_std::Uint64; -use serde_json::to_string; - use crate::types::BatchId; +use axelar_wasm_std::event; +use connection_router::state::{ChainName, CrossChainId}; +use cosmwasm_std::Uint64; pub enum Event { ProofUnderConstruction { + destination_chain: ChainName, command_batch_id: BatchId, multisig_session_id: Uint64, + msg_ids: Vec, }, } @@ -14,19 +16,61 @@ impl From for cosmwasm_std::Event { fn from(other: Event) -> Self { match other { Event::ProofUnderConstruction { + destination_chain, command_batch_id, multisig_session_id, + msg_ids, } => cosmwasm_std::Event::new("proof_under_construction") + .add_attribute( + "destination_chain", + event::attribute_value(&destination_chain) + .expect("violated invariant: destination_chain is not serializable"), + ) .add_attribute( "command_batch_id", - to_string(&command_batch_id) + event::attribute_value(&command_batch_id) .expect("violated invariant: command_batch_id is not serializable"), ) .add_attribute( "multisig_session_id", - to_string(&multisig_session_id) + event::attribute_value(&multisig_session_id) .expect("violated invariant: multisig_session_id is not serializable"), + ) + .add_attribute( + "message_ids", + event::attribute_value(&msg_ids) + .expect("violated invariant: message_ids is not serializable"), ), } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::BatchId; + use serde_json::to_string; + + #[test] + fn proof_under_construction_is_serializable() { + let msg_ids = vec![ + CrossChainId { + chain: "ethereum".parse().unwrap(), + id: "some_id".try_into().unwrap(), + }, + CrossChainId { + chain: "fantom".parse().unwrap(), + id: "some_other_id".try_into().unwrap(), + }, + ]; + + let event = Event::ProofUnderConstruction { + destination_chain: "avalanche".parse().unwrap(), + command_batch_id: BatchId::new(&msg_ids, None), + multisig_session_id: Uint64::new(2), + msg_ids, + }; + + assert!(to_string(&cosmwasm_std::Event::from(event)).is_ok()); + } +} diff --git a/contracts/multisig-prover/src/reply.rs b/contracts/multisig-prover/src/reply.rs index 86d98850f..d7a123f59 100644 --- a/contracts/multisig-prover/src/reply.rs +++ b/contracts/multisig-prover/src/reply.rs @@ -1,6 +1,7 @@ use cosmwasm_std::{from_binary, DepsMut, Reply, Response, Uint64}; use cw_utils::{parse_reply_execute_data, MsgExecuteContractResponse}; +use crate::state::{COMMANDS_BATCH, CONFIG}; use crate::{ error::ContractError, events::Event, @@ -8,6 +9,8 @@ use crate::{ }; pub fn start_multisig_reply(deps: DepsMut, reply: Reply) -> Result { + let config = CONFIG.load(deps.storage)?; + match parse_reply_execute_data(reply) { Ok(MsgExecuteContractResponse { data: Some(data) }) => { let command_batch_id = REPLY_BATCH.load(deps.storage)?; @@ -25,6 +28,10 @@ pub fn start_multisig_reply(deps: DepsMut, reply: Reply) -> Result(v: &T) -> Result +where + T: ?Sized + Serialize, +{ + let json = to_string(v)?; + // using strip_* instead of trim_matches because the latter trims any number of quotes instead of just one + let json = json + .strip_prefix('"') + .unwrap_or(&json) + .strip_suffix('"') + .unwrap_or(&json); + Ok(json.to_string()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[derive(Serialize)] + struct Foo { + s: String, + } + + #[test] + fn attribute_value_without_quotes() { + let foo = Foo { + s: "hello".to_string(), + }; + assert_eq!(attribute_value(&foo).unwrap(), r#"{"s":"hello"}"#); + } + + #[test] + fn attribute_value_with_quotes() { + let foo = Foo { + s: "\"hello\"".to_string(), + }; + assert_eq!(attribute_value(&foo).unwrap(), r#"{"s":"\"hello\""}"#); + } + + #[test] + fn attribute_value_with_integer() { + assert_eq!(attribute_value(&1).unwrap(), "1"); + } + + #[test] + fn attribute_value_with_vector() { + assert_eq!( + attribute_value(vec![1.0, 2.4, 0.72].as_slice()).unwrap(), + "[1.0,2.4,0.72]" + ); + } +} diff --git a/packages/axelar-wasm-std/src/lib.rs b/packages/axelar-wasm-std/src/lib.rs index dc7b9af63..6cdff67f9 100644 --- a/packages/axelar-wasm-std/src/lib.rs +++ b/packages/axelar-wasm-std/src/lib.rs @@ -7,6 +7,7 @@ pub use crate::{ pub mod counter; mod error; +pub mod event; pub mod flagset; mod fn_ext; pub mod hash; From 196416d7a4faca832cc6d5f58db1b86127becc68 Mon Sep 17 00:00:00 2001 From: Houmaan Chamani Date: Thu, 25 Jan 2024 12:58:40 -0500 Subject: [PATCH 06/54] chore: work in progress - removing grace from multisig config (#245) * chore: work in progress - removing grace from multisig config * fix: remove grace period from contract.rs, update env block change to reflect test parameters * fix: remove "before expiry" from comment Co-authored-by: CJ Cobb <46455409+cjcobb23@users.noreply.github.com> --------- Co-authored-by: CJ Cobb <46455409+cjcobb23@users.noreply.github.com> --- .../multisig-prover/src/test/multicontract.rs | 1 - contracts/multisig/src/contract.rs | 14 +++--- contracts/multisig/src/contract/execute.rs | 8 ++-- contracts/multisig/src/msg.rs | 1 - contracts/multisig/src/signing.rs | 45 +++++++------------ contracts/multisig/src/state.rs | 1 - integration-tests/tests/test_utils/mod.rs | 1 - 7 files changed, 27 insertions(+), 44 deletions(-) diff --git a/contracts/multisig-prover/src/test/multicontract.rs b/contracts/multisig-prover/src/test/multicontract.rs index 7372c92fb..e37a2a36b 100644 --- a/contracts/multisig-prover/src/test/multicontract.rs +++ b/contracts/multisig-prover/src/test/multicontract.rs @@ -48,7 +48,6 @@ fn instantiate_mock_multisig(app: &mut App) -> Addr { governance_address: "governance".parse().unwrap(), rewards_address: "rewards".to_string(), block_expiry: SIGNATURE_BLOCK_EXPIRY, - grace_period: 2, }; app.instantiate_contract( diff --git a/contracts/multisig/src/contract.rs b/contracts/multisig/src/contract.rs index 891045eda..084df4ab4 100644 --- a/contracts/multisig/src/contract.rs +++ b/contracts/multisig/src/contract.rs @@ -29,7 +29,6 @@ pub fn instantiate( governance: deps.api.addr_validate(&msg.governance_address)?, rewards_contract: deps.api.addr_validate(&msg.rewards_address)?, block_expiry: msg.block_expiry, - grace_period: msg.grace_period, }; CONFIG.save(deps.storage, &config)?; @@ -143,7 +142,6 @@ mod tests { governance_address: "governance".parse().unwrap(), rewards_address: REWARDS_CONTRACT.to_string(), block_expiry: SIGNATURE_BLOCK_EXPIRY, - grace_period: 2, }; instantiate(deps, env, info, msg) @@ -547,7 +545,7 @@ mod tests { } #[test] - fn submit_signature_during_grace_period() { + fn submit_signature_before_expiry() { let (mut deps, ecdsa_subkey, ed25519_subkey) = setup(); do_authorize_caller(deps.as_mut(), Addr::unchecked(PROVER)).unwrap(); @@ -563,7 +561,7 @@ mod tests { let signer = signers.get(1).unwrap().to_owned(); do_sign(deps.as_mut(), mock_env(), session_id, &signer).unwrap(); - // third signature, grace period + // third signature let signer = signers.get(2).unwrap().to_owned(); let expected_rewards_msg = WasmMsg::Execute { @@ -588,12 +586,12 @@ mod tests { assert!(!res .events .iter() - .any(|e| e.ty == "signing_completed".to_string())); // event is not re-emitted during grace period + .any(|e| e.ty == "signing_completed".to_string())); // event is not re-emitted } } #[test] - fn submit_signature_grace_period_over() { + fn submit_signature_after_expiry() { let (mut deps, ecdsa_subkey, ed25519_subkey) = setup(); do_authorize_caller(deps.as_mut(), Addr::unchecked(PROVER)).unwrap(); @@ -609,10 +607,10 @@ mod tests { let signer = signers.get(1).unwrap().to_owned(); do_sign(deps.as_mut(), mock_env(), session_id, &signer).unwrap(); - // third signature, grace period over + // third signature, expiration block passed let signer = signers.get(2).unwrap().to_owned(); let mut env = mock_env(); - env.block.height += 10; + env.block.height += 101; let res = do_sign(deps.as_mut(), env, session_id, &signer); assert_eq!( diff --git a/contracts/multisig/src/contract/execute.rs b/contracts/multisig/src/contract/execute.rs index 7167473da..5e4ca0314 100644 --- a/contracts/multisig/src/contract/execute.rs +++ b/contracts/multisig/src/contract/execute.rs @@ -31,11 +31,12 @@ pub fn start_signing_session( }, )?; - let signing_session = SigningSession::new(session_id, worker_set_id.clone(), msg.clone()); + let expires_at = env.block.height + config.block_expiry; - SIGNING_SESSIONS.save(deps.storage, session_id.into(), &signing_session)?; + let signing_session = + SigningSession::new(session_id, worker_set_id.clone(), msg.clone(), expires_at); - let expires_at = env.block.height + config.block_expiry; + SIGNING_SESSIONS.save(deps.storage, session_id.into(), &signing_session)?; let event = Event::SigningStarted { session_id, @@ -79,7 +80,6 @@ pub fn submit_signature( &info.sender, &signature, pub_key, - config.grace_period, env.block.height, )?; let signature = save_signature(deps.storage, session_id, signature, &info.sender)?; diff --git a/contracts/multisig/src/msg.rs b/contracts/multisig/src/msg.rs index 57e9c975f..a9f2a8a31 100644 --- a/contracts/multisig/src/msg.rs +++ b/contracts/multisig/src/msg.rs @@ -14,7 +14,6 @@ pub struct InstantiateMsg { pub governance_address: String, pub rewards_address: String, pub block_expiry: u64, - pub grace_period: u64, // in blocks after session has been completed } #[cw_serde] diff --git a/contracts/multisig/src/signing.rs b/contracts/multisig/src/signing.rs index 9ce2b7ef1..6a361bde8 100644 --- a/contracts/multisig/src/signing.rs +++ b/contracts/multisig/src/signing.rs @@ -16,15 +16,17 @@ pub struct SigningSession { pub worker_set_id: String, pub msg: MsgToSign, pub state: MultisigState, + pub expires_at: u64, } impl SigningSession { - pub fn new(session_id: Uint64, worker_set_id: String, msg: MsgToSign) -> Self { + pub fn new(session_id: Uint64, worker_set_id: String, msg: MsgToSign, expires_at: u64) -> Self { Self { id: session_id, worker_set_id, msg, state: MultisigState::Pending, + expires_at, } } @@ -49,11 +51,9 @@ pub fn validate_session_signature( signer: &Addr, signature: &Signature, pub_key: &PublicKey, - grace_period: u64, block_height: u64, ) -> Result<(), ContractError> { - if matches!(session.state, MultisigState::Completed { completed_at } if completed_at + grace_period < block_height) - { + if session.expires_at < block_height { return Err(ContractError::SigningSessionClosed { session_id: session.id, }); @@ -112,7 +112,9 @@ mod tests { let worker_set = build_worker_set(KeyType::Ecdsa, &signers); let message: MsgToSign = ecdsa_test_data::message().try_into().unwrap(); - let session = SigningSession::new(Uint64::one(), worker_set_id, message.clone()); + let expires_at = 12345; + let session = + SigningSession::new(Uint64::one(), worker_set_id, message.clone(), expires_at); let signatures: HashMap = signers .iter() @@ -143,7 +145,9 @@ mod tests { let worker_set = build_worker_set(key_type, &signers); let message: MsgToSign = ed25519_test_data::message().try_into().unwrap(); - let session = SigningSession::new(Uint64::one(), worker_set_id, message.clone()); + let expires_at = 12345; + let session = + SigningSession::new(Uint64::one(), worker_set_id, message.clone(), expires_at); let signatures: HashMap = signers .iter() @@ -194,31 +198,25 @@ mod tests { let signature = config.signatures.values().next().unwrap(); let pub_key = &worker_set.signers.get(&signer.to_string()).unwrap().pub_key; - assert!( - validate_session_signature(&session, &signer, signature, pub_key, 0, 0).is_ok() - ); + assert!(validate_session_signature(&session, &signer, signature, pub_key, 0).is_ok()); } } #[test] - fn success_validation_grace_period() { + fn success_validation_expiry_not_reached() { for config in [ecdsa_setup(), ed25519_setup()] { let mut session = config.session; let worker_set = config.worker_set; let signer = Addr::unchecked(config.signatures.keys().next().unwrap()); let signature = config.signatures.values().next().unwrap(); - let completed_at = 12345; - let grace_period = 10; - let block_height = completed_at + grace_period; // inclusive + let block_height = 12340; // inclusive let pub_key = &worker_set.signers.get(&signer.to_string()).unwrap().pub_key; - session.state = MultisigState::Completed { completed_at }; assert!(validate_session_signature( &session, &signer, signature, pub_key, - grace_period, block_height ) .is_ok()); @@ -232,20 +230,11 @@ mod tests { let worker_set = config.worker_set; let signer = Addr::unchecked(config.signatures.keys().next().unwrap()); let signature = config.signatures.values().next().unwrap(); - let completed_at = 12345; - let grace_period = 10; - let block_height = completed_at + grace_period + 1; + let block_height = 12346; let pub_key = &worker_set.signers.get(&signer.to_string()).unwrap().pub_key; - session.state = MultisigState::Completed { completed_at }; - let result = validate_session_signature( - &session, - &signer, - signature, - pub_key, - grace_period, - block_height, - ); + let result = + validate_session_signature(&session, &signer, signature, pub_key, block_height); assert_eq!( result.unwrap_err(), @@ -273,7 +262,7 @@ mod tests { .try_into() .unwrap(); - let result = validate_session_signature(&session, &signer, &invalid_sig, pub_key, 0, 0); + let result = validate_session_signature(&session, &signer, &invalid_sig, pub_key, 0); assert_eq!( result.unwrap_err(), diff --git a/contracts/multisig/src/state.rs b/contracts/multisig/src/state.rs index 6a409b839..253bdaad1 100644 --- a/contracts/multisig/src/state.rs +++ b/contracts/multisig/src/state.rs @@ -16,7 +16,6 @@ pub struct Config { pub governance: Addr, pub rewards_contract: Addr, pub block_expiry: u64, // number of blocks after which a signing session expires - pub grace_period: u64, // TODO: add update mechanism to change this after instantiation } pub const CONFIG: Item = Item::new("config"); diff --git a/integration-tests/tests/test_utils/mod.rs b/integration-tests/tests/test_utils/mod.rs index d1fc7266f..78e3ae72b 100644 --- a/integration-tests/tests/test_utils/mod.rs +++ b/integration-tests/tests/test_utils/mod.rs @@ -338,7 +338,6 @@ pub fn setup_protocol(service_name: nonempty::String) -> Protocol { rewards_address: rewards_address.to_string(), governance_address: governance_address.to_string(), block_expiry: SIGNATURE_BLOCK_EXPIRY, - grace_period: 2, }, ); let service_registry_address = instantiate_service_registry( From 98430b8b1e98d65be5db32dce0f2ea4b30548b29 Mon Sep 17 00:00:00 2001 From: eguajardo Date: Thu, 25 Jan 2024 12:59:15 -0600 Subject: [PATCH 07/54] feat(voting-verifier): query to check messages consensus (#231) * add query to check message consensus * add query to get worker set consensus and rename functions * update is verified query with statuses query * cleanup and rename * add unit test * review * fix typo * review comments --- contracts/aggregate-verifier/src/contract.rs | 7 +- contracts/aggregate-verifier/src/msg.rs | 5 +- contracts/aggregate-verifier/tests/mock.rs | 14 +- contracts/aggregate-verifier/tests/test.rs | 23 +-- contracts/gateway/src/contract/execute.rs | 79 ++++---- contracts/gateway/src/contract/query.rs | 5 +- contracts/multisig-prover/src/execute.rs | 8 +- .../src/test/mocks/voting_verifier.rs | 8 +- contracts/voting-verifier/src/contract.rs | 10 +- contracts/voting-verifier/src/execute.rs | 43 +++-- contracts/voting-verifier/src/msg.rs | 12 +- contracts/voting-verifier/src/query.rs | 107 ++++------- contracts/voting-verifier/tests/tests.rs | 171 +++++++++++++++--- packages/axelar-wasm-std/src/lib.rs | 2 + packages/axelar-wasm-std/src/verification.rs | 36 ++++ 15 files changed, 342 insertions(+), 188 deletions(-) create mode 100644 packages/axelar-wasm-std/src/verification.rs diff --git a/contracts/aggregate-verifier/src/contract.rs b/contracts/aggregate-verifier/src/contract.rs index 09b1b0ae0..e6c30aebb 100644 --- a/contracts/aggregate-verifier/src/contract.rs +++ b/contracts/aggregate-verifier/src/contract.rs @@ -1,3 +1,4 @@ +use axelar_wasm_std::VerificationStatus; use connection_router::state::CrossChainId; #[cfg(not(feature = "library"))] use cosmwasm_std::entry_point; @@ -78,7 +79,7 @@ pub fn reply( match parse_reply_execute_data(reply) { Ok(MsgExecuteContractResponse { data: Some(data) }) => { // check format of data - let _: Vec<(CrossChainId, bool)> = from_binary(&data)?; + let _: Vec<(CrossChainId, VerificationStatus)> = from_binary(&data)?; // only one verifier, so just return the response as is Ok(Response::new().set_data(data)) @@ -97,11 +98,11 @@ pub fn reply( #[cfg_attr(not(feature = "library"), entry_point)] pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult { match msg { - QueryMsg::IsVerified { messages } => { + QueryMsg::GetMessagesStatus { messages } => { let verifier = CONFIG.load(deps.storage)?.verifier; deps.querier.query(&QueryRequest::Wasm(WasmQuery::Smart { contract_addr: verifier.to_string(), - msg: to_binary(&voting_msg::QueryMsg::IsVerified { messages })?, + msg: to_binary(&voting_msg::QueryMsg::GetMessagesStatus { messages })?, })) } } diff --git a/contracts/aggregate-verifier/src/msg.rs b/contracts/aggregate-verifier/src/msg.rs index 2a981fc37..b1c21ff39 100644 --- a/contracts/aggregate-verifier/src/msg.rs +++ b/contracts/aggregate-verifier/src/msg.rs @@ -1,3 +1,4 @@ +use axelar_wasm_std::VerificationStatus; use connection_router::state::Message; use cosmwasm_schema::{cw_serde, QueryResponses}; @@ -15,6 +16,6 @@ pub enum ExecuteMsg { #[cw_serde] #[derive(QueryResponses)] pub enum QueryMsg { - #[returns(Vec<(connection_router::state::CrossChainId, bool)>)] - IsVerified { messages: Vec }, + #[returns(Vec<(connection_router::state::CrossChainId, VerificationStatus)>)] + GetMessagesStatus { messages: Vec }, } diff --git a/contracts/aggregate-verifier/tests/mock.rs b/contracts/aggregate-verifier/tests/mock.rs index f871ce6c3..daf98b99d 100644 --- a/contracts/aggregate-verifier/tests/mock.rs +++ b/contracts/aggregate-verifier/tests/mock.rs @@ -1,11 +1,13 @@ use aggregate_verifier::error::ContractError; +use axelar_wasm_std::VerificationStatus; use connection_router::state::{CrossChainId, Message}; use cosmwasm_schema::cw_serde; use cosmwasm_std::{to_binary, Addr, DepsMut, Env, MessageInfo, Response}; use cw_multi_test::{App, ContractWrapper, Executor}; use cw_storage_plus::Map; -const MOCK_VOTING_VERIFIER_MESSAGES: Map = Map::new("voting_verifier_messages"); +const MOCK_VOTING_VERIFIER_MESSAGES: Map = + Map::new("voting_verifier_messages"); #[cw_serde] pub enum MockVotingVerifierExecuteMsg { @@ -27,15 +29,19 @@ pub fn mock_verifier_execute( let mut res = vec![]; for m in messages { match MOCK_VOTING_VERIFIER_MESSAGES.may_load(deps.storage, m.cc_id.clone())? { - Some(b) => res.push((m.cc_id, b)), - None => res.push((m.cc_id, false)), + Some(status) => res.push((m.cc_id, status)), + None => res.push((m.cc_id, VerificationStatus::None)), } } Ok(Response::new().set_data(to_binary(&res)?)) } MockVotingVerifierExecuteMsg::MessagesVerified { messages } => { for m in messages { - MOCK_VOTING_VERIFIER_MESSAGES.save(deps.storage, m.cc_id, &true)?; + MOCK_VOTING_VERIFIER_MESSAGES.save( + deps.storage, + m.cc_id, + &VerificationStatus::SucceededOnChain, + )?; } Ok(Response::new()) } diff --git a/contracts/aggregate-verifier/tests/test.rs b/contracts/aggregate-verifier/tests/test.rs index 77422f873..d6c1930cb 100644 --- a/contracts/aggregate-verifier/tests/test.rs +++ b/contracts/aggregate-verifier/tests/test.rs @@ -1,5 +1,6 @@ use aggregate_verifier::contract::*; use aggregate_verifier::msg::{ExecuteMsg, InstantiateMsg}; +use axelar_wasm_std::VerificationStatus; use connection_router::state::{CrossChainId, Message, ID_SEPARATOR}; use cosmwasm_std::from_binary; use cosmwasm_std::Addr; @@ -56,7 +57,7 @@ fn verify_messages_empty() { &[], ) .unwrap(); - let ret: Vec<(CrossChainId, bool)> = from_binary(&res.data.unwrap()).unwrap(); + let ret: Vec<(CrossChainId, VerificationStatus)> = from_binary(&res.data.unwrap()).unwrap(); assert_eq!(ret, vec![]); } @@ -92,12 +93,12 @@ fn verify_messages_not_verified() { &[], ) .unwrap(); - let ret: Vec<(CrossChainId, bool)> = from_binary(&res.data.unwrap()).unwrap(); + let ret: Vec<(CrossChainId, VerificationStatus)> = from_binary(&res.data.unwrap()).unwrap(); assert_eq!( ret, msgs.iter() - .map(|msg| (msg.cc_id.clone(), false)) - .collect::>() + .map(|msg| (msg.cc_id.clone(), VerificationStatus::None)) + .collect::>() ); } @@ -135,12 +136,12 @@ fn verify_messages_verified() { &[], ) .unwrap(); - let ret: Vec<(CrossChainId, bool)> = from_binary(&res.data.unwrap()).unwrap(); + let ret: Vec<(CrossChainId, VerificationStatus)> = from_binary(&res.data.unwrap()).unwrap(); assert_eq!( ret, msgs.iter() - .map(|msg| (msg.cc_id.clone(), true)) - .collect::>() + .map(|msg| (msg.cc_id.clone(), VerificationStatus::SucceededOnChain)) + .collect::>() ); } @@ -179,7 +180,7 @@ fn verify_messages_mixed_status() { &[], ) .unwrap(); - let ret: Vec<(CrossChainId, bool)> = from_binary(&res.data.unwrap()).unwrap(); + let ret: Vec<(CrossChainId, VerificationStatus)> = from_binary(&res.data.unwrap()).unwrap(); assert_eq!( ret, msgs.iter() @@ -189,11 +190,11 @@ fn verify_messages_mixed_status() { .find(|verified_msg| *verified_msg == msg) .is_some() { - (msg.cc_id.clone(), true) + (msg.cc_id.clone(), VerificationStatus::SucceededOnChain) } else { - (msg.cc_id.clone(), false) + (msg.cc_id.clone(), VerificationStatus::None) } }) - .collect::>() + .collect::>() ); } diff --git a/contracts/gateway/src/contract/execute.rs b/contracts/gateway/src/contract/execute.rs index 9ab7a1446..250d6857a 100644 --- a/contracts/gateway/src/contract/execute.rs +++ b/contracts/gateway/src/contract/execute.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; +use axelar_wasm_std::VerificationStatus; use cosmwasm_std::{to_binary, Addr, DepsMut, Response, WasmMsg}; use error_stack::{Result, ResultExt}; use itertools::Itertools; @@ -135,15 +136,19 @@ where ) -> Result<(Vec, Vec), ContractError> { let query_response = self.verifier - .verify(aggregate_verifier::msg::QueryMsg::IsVerified { + .verify(aggregate_verifier::msg::QueryMsg::GetMessagesStatus { messages: msgs.to_vec(), })?; - let is_verified = query_response.into_iter().collect::>(); + let statuses = query_response.into_iter().collect::>(); - Ok(msgs - .into_iter() - .partition(|msg| -> bool { is_verified.get(&msg.cc_id).copied().unwrap_or(false) })) + Ok(msgs.into_iter().partition(|msg| { + statuses + .get(&msg.cc_id) + .copied() + .unwrap_or(VerificationStatus::None) + == VerificationStatus::SucceededOnChain + })) } } @@ -169,6 +174,7 @@ mod tests { use crate::contract::query; use crate::error::ContractError; use crate::state; + use axelar_wasm_std::VerificationStatus; use connection_router::state::{CrossChainId, Message, ID_SEPARATOR}; use cosmwasm_std::{Addr, CosmosMsg, SubMsg, WasmMsg}; use error_stack::bail; @@ -181,8 +187,8 @@ mod tests { let msg_store = Arc::new(RwLock::new(HashMap::new())); let mut msgs = generate_messages(10); // no messages are verified - let is_verified = HashMap::new(); - let contract = create_contract(msg_store.clone(), is_verified); + let verified = HashMap::new(); + let contract = create_contract(msg_store.clone(), verified); // duplicate some IDs msgs[5..] @@ -200,8 +206,11 @@ mod tests { let msg_store = Arc::new(RwLock::new(HashMap::new())); let msgs = generate_messages(10); // mark all generated messages as verified - let is_verified = msgs.iter().map(|msg| (msg.cc_id.clone(), true)).collect(); - let contract = create_contract(msg_store.clone(), is_verified); + let verified = msgs + .iter() + .map(|msg| (msg.cc_id.clone(), VerificationStatus::SucceededOnChain)) + .collect(); + let contract = create_contract(msg_store.clone(), verified); // try zero, one, many messages let inputs = vec![vec![], msgs[..1].to_vec(), msgs]; @@ -218,8 +227,8 @@ mod tests { let msg_store = Arc::new(RwLock::new(HashMap::new())); let msgs = generate_messages(10); // no messages are verified - let is_verified = HashMap::new(); - let contract = create_contract(msg_store.clone(), is_verified); + let verified = HashMap::new(); + let contract = create_contract(msg_store.clone(), verified); // try one and many messages (zero messages are tested in verify_all_verified) let inputs = vec![msgs[..1].to_vec(), msgs]; @@ -242,11 +251,11 @@ mod tests { let msg_store = Arc::new(RwLock::new(HashMap::new())); let msgs = generate_messages(10); // half of the messages are verified - let is_verified = msgs[..5] + let verified = msgs[..5] .iter() - .map(|msg| (msg.cc_id.clone(), true)) + .map(|msg| (msg.cc_id.clone(), VerificationStatus::SucceededOnChain)) .collect(); - let contract = create_contract(msg_store.clone(), is_verified); + let contract = create_contract(msg_store.clone(), verified); // expect: no error, only the unverified messages get verified let result = contract.verify_messages(msgs.clone()); @@ -264,11 +273,11 @@ mod tests { let msg_store = Arc::new(RwLock::new(HashMap::new())); let msgs = generate_messages(10); // half of the messages are verified - let is_verified = msgs[..5] + let verified = msgs[..5] .iter() - .map(|msg| (msg.cc_id.clone(), true)) + .map(|msg| (msg.cc_id.clone(), VerificationStatus::SucceededOnChain)) .collect(); - let contract = create_contract(msg_store.clone(), is_verified); + let contract = create_contract(msg_store.clone(), verified); // expect: same response when called multiple times and no messages are stored let result1 = contract.verify_messages(msgs.clone()); @@ -305,8 +314,8 @@ mod tests { let msg_store = Arc::new(RwLock::new(HashMap::new())); let mut msgs = generate_messages(10); // no messages are verified - let is_verified = HashMap::new(); - let mut contract = create_contract(msg_store.clone(), is_verified); + let verified = HashMap::new(); + let mut contract = create_contract(msg_store.clone(), verified); // senders are "router" and "not a router" let senders = vec![ @@ -337,8 +346,11 @@ mod tests { let msgs = generate_messages(10); // mark all generated messages as verified - let is_verified = msgs.iter().map(|msg| (msg.cc_id.clone(), true)).collect(); - let mut contract = create_contract(msg_store.clone(), is_verified); + let verified = msgs + .iter() + .map(|msg| (msg.cc_id.clone(), VerificationStatus::SucceededOnChain)) + .collect(); + let mut contract = create_contract(msg_store.clone(), verified); // try one and many messages (zero messages are tested in route_none_verified) let inputs = vec![msgs[..1].to_vec(), msgs]; @@ -367,8 +379,8 @@ mod tests { let msg_store = Arc::new(RwLock::new(HashMap::new())); let msgs = generate_messages(10); // no messages are verified - let is_verified = HashMap::new(); - let mut contract = create_contract(msg_store.clone(), is_verified); + let verified = HashMap::new(); + let mut contract = create_contract(msg_store.clone(), verified); // try zero, one, many messages let inputs = vec![vec![], msgs[..1].to_vec(), msgs]; @@ -393,11 +405,11 @@ mod tests { let msg_store = Arc::new(RwLock::new(HashMap::new())); let msgs = generate_messages(10); // half of the messages are verified - let is_verified = msgs[..5] + let verified = msgs[..5] .iter() - .map(|msg| (msg.cc_id.clone(), true)) + .map(|msg| (msg.cc_id.clone(), VerificationStatus::SucceededOnChain)) .collect(); - let mut contract = create_contract(msg_store.clone(), is_verified); + let mut contract = create_contract(msg_store.clone(), verified); // expect: send verified msgs to router when sender is not the router let result = contract.route_messages(Addr::unchecked("not a router"), msgs.clone()); @@ -419,11 +431,11 @@ mod tests { let msg_store = Arc::new(RwLock::new(HashMap::new())); let msgs = generate_messages(10); // half of the messages are verified - let is_verified = msgs[..5] + let verified = msgs[..5] .iter() - .map(|msg| (msg.cc_id.clone(), true)) + .map(|msg| (msg.cc_id.clone(), VerificationStatus::SucceededOnChain)) .collect(); - let mut contract = create_contract(msg_store.clone(), is_verified); + let mut contract = create_contract(msg_store.clone(), verified); let senders = vec![ contract.config.router.clone(), @@ -474,7 +486,7 @@ mod tests { fn create_contract( // the store mock requires a 'static type that can be moved into the closure, so we need to use an Arc<> here msg_store: Arc>>, - is_verified: HashMap, + verified: HashMap, ) -> Contract { let config = state::Config { verifier: Addr::unchecked("verifier"), @@ -493,13 +505,16 @@ mod tests { let mut verifier = query::MockVerifier::new(); verifier.expect_verify().returning(move |msg| match msg { - aggregate_verifier::msg::QueryMsg::IsVerified { messages } => Ok(messages + aggregate_verifier::msg::QueryMsg::GetMessagesStatus { messages } => Ok(messages .into_iter() .map(|msg: Message| { ( msg.cc_id.clone(), // if the msg is not know to the verifier, it is not verified - is_verified.get(&msg.cc_id).copied().unwrap_or(false), + verified + .get(&msg.cc_id) + .copied() + .unwrap_or(VerificationStatus::None), ) }) .collect::>()), diff --git a/contracts/gateway/src/contract/query.rs b/contracts/gateway/src/contract/query.rs index e80b2dbb1..8d99a6cce 100644 --- a/contracts/gateway/src/contract/query.rs +++ b/contracts/gateway/src/contract/query.rs @@ -1,5 +1,6 @@ use crate::error::ContractError; use crate::state::OUTGOING_MESSAGES; +use axelar_wasm_std::VerificationStatus; use connection_router::state::CrossChainId; use cosmwasm_std::{to_binary, Addr, Binary, Deps, QuerierWrapper, QueryRequest, WasmQuery}; use error_stack::{Result, ResultExt}; @@ -10,7 +11,7 @@ pub trait Verifier { fn verify( &self, msg: aggregate_verifier::msg::QueryMsg, - ) -> Result, ContractError>; + ) -> Result, ContractError>; } pub struct VerifierApi<'a> { @@ -22,7 +23,7 @@ impl Verifier for VerifierApi<'_> { fn verify( &self, msg: aggregate_verifier::msg::QueryMsg, - ) -> Result, ContractError> { + ) -> Result, ContractError> { self.querier .query(&QueryRequest::Wasm(WasmQuery::Smart { contract_addr: self.address.to_string(), diff --git a/contracts/multisig-prover/src/execute.rs b/contracts/multisig-prover/src/execute.rs index 3fe06fe94..0a2677e13 100644 --- a/contracts/multisig-prover/src/execute.rs +++ b/contracts/multisig-prover/src/execute.rs @@ -5,7 +5,7 @@ use cosmwasm_std::{ use multisig::{key::PublicKey, msg::Signer, worker_set::WorkerSet}; -use axelar_wasm_std::snapshot; +use axelar_wasm_std::{snapshot, VerificationStatus}; use connection_router::state::{ChainName, CrossChainId, Message}; use service_registry::state::Worker; @@ -234,16 +234,16 @@ pub fn confirm_worker_set(deps: DepsMut) -> Result { let worker_set = NEXT_WORKER_SET.load(deps.storage)?; - let query = voting_verifier::msg::QueryMsg::IsWorkerSetVerified { + let query = voting_verifier::msg::QueryMsg::GetWorkerSetStatus { new_operators: make_operators(worker_set.clone(), config.encoder), }; - let is_confirmed: bool = deps.querier.query(&QueryRequest::Wasm(WasmQuery::Smart { + let status: VerificationStatus = deps.querier.query(&QueryRequest::Wasm(WasmQuery::Smart { contract_addr: config.voting_verifier.to_string(), msg: to_binary(&query)?, }))?; - if !is_confirmed { + if status != VerificationStatus::SucceededOnChain { return Err(ContractError::WorkerSetNotConfirmed); } diff --git a/contracts/multisig-prover/src/test/mocks/voting_verifier.rs b/contracts/multisig-prover/src/test/mocks/voting_verifier.rs index ef7627853..85eeced73 100644 --- a/contracts/multisig-prover/src/test/mocks/voting_verifier.rs +++ b/contracts/multisig-prover/src/test/mocks/voting_verifier.rs @@ -1,4 +1,4 @@ -use axelar_wasm_std::{hash::Hash, operators::Operators}; +use axelar_wasm_std::{hash::Hash, operators::Operators, VerificationStatus}; use cosmwasm_schema::cw_serde; use cosmwasm_std::{ to_binary, Addr, Binary, Deps, DepsMut, Env, HexBinary, MessageInfo, Response, StdError, @@ -65,10 +65,12 @@ pub fn confirm_worker_set( pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult { match msg { - QueryMsg::IsWorkerSetVerified { new_operators } => to_binary( + QueryMsg::GetWorkerSetStatus { new_operators } => to_binary( &CONFIRMED_WORKER_SETS .may_load(deps.storage, &new_operators.hash())? - .is_some(), + .map_or(VerificationStatus::None, |_| { + VerificationStatus::SucceededOnChain + }), ), _ => unimplemented!(), } diff --git a/contracts/voting-verifier/src/contract.rs b/contracts/voting-verifier/src/contract.rs index 77f1c5007..5d162a7a0 100644 --- a/contracts/voting-verifier/src/contract.rs +++ b/contracts/voting-verifier/src/contract.rs @@ -52,13 +52,15 @@ pub fn execute( #[cfg_attr(not(feature = "library"), entry_point)] pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult { match msg { - QueryMsg::IsVerified { messages } => to_binary(&query::is_verified(deps, &messages)?), - QueryMsg::GetPoll { poll_id: _ } => { todo!() } - QueryMsg::IsWorkerSetVerified { new_operators } => { - to_binary(&query::is_worker_set_verified(deps, &new_operators)?) + + QueryMsg::GetMessagesStatus { messages } => { + to_binary(&query::messages_status(deps, &messages)?) + } + QueryMsg::GetWorkerSetStatus { new_operators } => { + to_binary(&query::worker_set_status(deps, &new_operators)?) } } } diff --git a/contracts/voting-verifier/src/execute.rs b/contracts/voting-verifier/src/execute.rs index f9a16dac6..f88837762 100644 --- a/contracts/voting-verifier/src/execute.rs +++ b/contracts/voting-verifier/src/execute.rs @@ -1,24 +1,27 @@ -use axelar_wasm_std::operators::Operators; use cosmwasm_std::{ to_binary, Deps, DepsMut, Env, MessageInfo, QueryRequest, Response, Storage, WasmMsg, WasmQuery, }; -use axelar_wasm_std::voting::{PollId, Vote}; -use axelar_wasm_std::{nonempty, snapshot, voting::WeightedPoll}; +use axelar_wasm_std::{ + nonempty, + operators::Operators, + snapshot, + voting::{PollId, Vote, WeightedPoll}, + VerificationStatus, +}; + use connection_router::state::{ChainName, Message}; use service_registry::msg::QueryMsg; use service_registry::state::Worker; -use crate::error::ContractError; use crate::events::{ PollEnded, PollMetadata, PollStarted, TxEventConfirmation, Voted, WorkerSetConfirmation, }; use crate::msg::{EndPollResponse, VerifyMessagesResponse}; -use crate::query::{ - is_verified, is_worker_set_verified, msg_verification_status, VerificationStatus, -}; +use crate::query::worker_set_status; use crate::state::{self, Poll, PollContent, POLL_MESSAGES, POLL_WORKER_SETS}; use crate::state::{CONFIG, POLLS, POLL_ID}; +use crate::{error::ContractError, query::message_status}; pub fn verify_worker_set( deps: DepsMut, @@ -26,7 +29,8 @@ pub fn verify_worker_set( message_id: nonempty::String, new_operators: Operators, ) -> Result { - if is_worker_set_verified(deps.as_ref(), &new_operators)? { + let status = worker_set_status(deps.as_ref(), &new_operators)?; + if status.is_confirmed() { return Err(ContractError::WorkerSetAlreadyConfirmed); } @@ -83,22 +87,27 @@ pub fn verify_messages( let config = CONFIG.load(deps.storage)?; - let response = Response::new().set_data(to_binary(&VerifyMessagesResponse { - verification_statuses: is_verified(deps.as_ref(), &messages)?, - })?); - let messages = messages .into_iter() - .map(|message| { - msg_verification_status(deps.as_ref(), &message).map(|status| (status, message)) - }) + .map(|message| message_status(deps.as_ref(), &message).map(|status| (status, message))) .collect::, _>>()?; + let response = Response::new().set_data(to_binary(&VerifyMessagesResponse { + verification_statuses: messages + .iter() + .map(|(status, message)| (message.cc_id.to_owned(), status.to_owned())) + .collect(), + })?); + let msgs_to_verify: Vec = messages .into_iter() .filter_map(|(status, message)| match status { - VerificationStatus::FailedToVerify | VerificationStatus::NotVerified => Some(message), - VerificationStatus::InProgress | VerificationStatus::Verified => None, + VerificationStatus::NotFound + | VerificationStatus::FailedToVerify + | VerificationStatus::None => Some(message), + VerificationStatus::InProgress + | VerificationStatus::SucceededOnChain + | VerificationStatus::FailedOnChain => None, }) .collect(); diff --git a/contracts/voting-verifier/src/msg.rs b/contracts/voting-verifier/src/msg.rs index 47c2f5381..0e617f6f3 100644 --- a/contracts/voting-verifier/src/msg.rs +++ b/contracts/voting-verifier/src/msg.rs @@ -4,7 +4,7 @@ use axelar_wasm_std::{ nonempty, operators::Operators, voting::{PollId, PollState, Vote}, - MajorityThreshold, + MajorityThreshold, VerificationStatus, }; use connection_router::state::{ChainName, CrossChainId, Message}; @@ -61,16 +61,16 @@ pub enum QueryMsg { #[returns(Poll)] GetPoll { poll_id: PollId }, - #[returns(Vec<(connection_router::state::CrossChainId, bool)>)] - IsVerified { messages: Vec }, + #[returns(Vec<(connection_router::state::CrossChainId, VerificationStatus)>)] + GetMessagesStatus { messages: Vec }, - #[returns(bool)] - IsWorkerSetVerified { new_operators: Operators }, + #[returns(VerificationStatus)] + GetWorkerSetStatus { new_operators: Operators }, } #[cw_serde] pub struct VerifyMessagesResponse { - pub verification_statuses: Vec<(CrossChainId, bool)>, + pub verification_statuses: Vec<(CrossChainId, VerificationStatus)>, } #[cw_serde] diff --git a/contracts/voting-verifier/src/query.rs b/contracts/voting-verifier/src/query.rs index 3a4efdb48..5beba7a6f 100644 --- a/contracts/voting-verifier/src/query.rs +++ b/contracts/voting-verifier/src/query.rs @@ -1,58 +1,39 @@ -use axelar_wasm_std::operators::Operators; -use axelar_wasm_std::voting::{PollStatus, Vote}; +use axelar_wasm_std::{ + operators::Operators, + voting::{PollStatus, Vote}, + VerificationStatus, +}; use connection_router::state::{CrossChainId, Message}; -use cosmwasm_schema::cw_serde; use cosmwasm_std::Deps; use crate::error::ContractError; use crate::state::{self, Poll, PollContent, POLLS, POLL_MESSAGES, POLL_WORKER_SETS}; -#[cw_serde] -pub enum VerificationStatus { - Verified, - FailedToVerify, - InProgress, // still in an open poll - NotVerified, // not in a poll -} - -pub fn is_verified( +pub fn messages_status( deps: Deps, messages: &[Message], -) -> Result, ContractError> { +) -> Result, ContractError> { messages .iter() .map(|message| { - msg_verification_status(deps, message).map(|status| { - ( - message.cc_id.to_owned(), - matches!(status, VerificationStatus::Verified), - ) - }) + message_status(deps, message).map(|status| (message.cc_id.to_owned(), status)) }) .collect::, _>>() } -pub fn is_worker_set_verified(deps: Deps, operators: &Operators) -> Result { - Ok(matches!( - worker_set_verification_status(deps, operators)?, - VerificationStatus::Verified - )) -} - -pub fn msg_verification_status( - deps: Deps, - message: &Message, -) -> Result { +pub fn message_status(deps: Deps, message: &Message) -> Result { let loaded_poll_content = POLL_MESSAGES.may_load(deps.storage, &message.hash())?; + Ok(verification_status(deps, loaded_poll_content, message)) } -pub fn worker_set_verification_status( +pub fn worker_set_status( deps: Deps, operators: &Operators, ) -> Result { - let poll_content = POLL_WORKER_SETS.may_load(deps.storage, &operators.hash())?; - Ok(verification_status(deps, poll_content, operators)) + let loaded_poll_content = POLL_WORKER_SETS.may_load(deps.storage, &operators.hash())?; + + Ok(verification_status(deps, loaded_poll_content, operators)) } fn verification_status( @@ -69,25 +50,23 @@ fn verification_status( let poll = POLLS .load(deps.storage, stored.poll_id) - .expect("invalid invariant: message poll not found"); - - let verified = match &poll { - Poll::Messages(poll) | Poll::ConfirmWorkerSet(poll) => { - poll.consensus(stored.index_in_poll) - .expect("invalid invariant: message not found in poll") - == Some(Vote::SucceededOnChain) // TODO: consider Vote::FailedOnChain? - } + .expect("invalid invariant: content's poll not found"); + + let consensus = match &poll { + Poll::Messages(poll) | Poll::ConfirmWorkerSet(poll) => poll + .consensus(stored.index_in_poll) + .expect("invalid invariant: message not found in poll"), }; - if verified { - VerificationStatus::Verified - } else if is_finished(&poll) { - VerificationStatus::FailedToVerify - } else { - VerificationStatus::InProgress + match consensus { + Some(Vote::SucceededOnChain) => VerificationStatus::SucceededOnChain, + Some(Vote::FailedOnChain) => VerificationStatus::FailedOnChain, + Some(Vote::NotFound) => VerificationStatus::NotFound, + None if is_finished(&poll) => VerificationStatus::FailedToVerify, + None => VerificationStatus::InProgress, } } - None => VerificationStatus::NotVerified, + None => VerificationStatus::None, } } @@ -136,12 +115,8 @@ mod tests { .unwrap(); assert_eq!( - msg_verification_status(deps.as_ref(), &msg).unwrap(), - VerificationStatus::InProgress - ); - assert_eq!( - vec![(msg.cc_id.clone(), false)], - is_verified(deps.as_ref(), &[msg]).unwrap() + vec![(msg.cc_id.clone(), VerificationStatus::InProgress)], + messages_status(deps.as_ref(), &[msg]).unwrap() ); } @@ -172,12 +147,8 @@ mod tests { .unwrap(); assert_eq!( - msg_verification_status(deps.as_ref(), &msg).unwrap(), - VerificationStatus::Verified - ); - assert_eq!( - vec![(msg.cc_id.clone(), true)], - is_verified(deps.as_ref(), &[msg]).unwrap() + vec![(msg.cc_id.clone(), VerificationStatus::SucceededOnChain)], + messages_status(deps.as_ref(), &[msg]).unwrap() ); } @@ -207,12 +178,8 @@ mod tests { .unwrap(); assert_eq!( - msg_verification_status(deps.as_ref(), &msg).unwrap(), - VerificationStatus::FailedToVerify - ); - assert_eq!( - vec![(msg.cc_id.clone(), false)], - is_verified(deps.as_ref(), &[msg]).unwrap() + vec![(msg.cc_id.clone(), VerificationStatus::FailedToVerify)], + messages_status(deps.as_ref(), &[msg]).unwrap() ); } @@ -222,12 +189,8 @@ mod tests { let msg = message(1); assert_eq!( - msg_verification_status(deps.as_ref(), &msg).unwrap(), - VerificationStatus::NotVerified - ); - assert_eq!( - vec![(msg.cc_id.clone(), false)], - is_verified(deps.as_ref(), &[msg]).unwrap() + vec![(msg.cc_id.clone(), VerificationStatus::None)], + messages_status(deps.as_ref(), &[msg]).unwrap() ); } diff --git a/contracts/voting-verifier/tests/tests.rs b/contracts/voting-verifier/tests/tests.rs index 186c23097..2fd4106c2 100644 --- a/contracts/voting-verifier/tests/tests.rs +++ b/contracts/voting-verifier/tests/tests.rs @@ -3,7 +3,7 @@ use cosmwasm_std::{from_binary, Addr, Uint64}; use cw_multi_test::{App, ContractWrapper, Executor}; use axelar_wasm_std::operators::Operators; -use axelar_wasm_std::{nonempty, Threshold}; +use axelar_wasm_std::{nonempty, Threshold, VerificationStatus}; use connection_router::state::{ChainName, CrossChainId, Message, ID_SEPARATOR}; use mock::make_mock_rewards; use service_registry::state::Worker; @@ -147,14 +147,14 @@ fn should_verify_messages_if_not_verified() { id: "id:0".parse().unwrap(), chain: source_chain() }, - false + VerificationStatus::None ), ( CrossChainId { id: "id:1".parse().unwrap(), chain: source_chain() }, - false + VerificationStatus::None ), ] ); @@ -266,6 +266,114 @@ fn should_retry_if_message_not_verified() { assert_eq!(messages.len() as u64, 1); } +#[test] +fn should_retry_if_status_not_final() { + let mut app = App::default(); + + let service_registry_address = make_mock_service_registry(&mut app); + + let contract_address = + initialize_contract(&mut app, service_registry_address.as_ref().parse().unwrap()); + + let workers: Vec = app + .wrap() + .query_wasm_smart( + service_registry_address, + &service_registry::msg::QueryMsg::GetActiveWorkers { + service_name: "service_name".to_string(), + chain_name: source_chain(), + }, + ) + .unwrap(); + + let messages = messages(4); + + let msg_verify = msg::ExecuteMsg::VerifyMessages { + messages: messages.clone(), + }; + let res = app.execute_contract( + Addr::unchecked(SENDER), + contract_address.clone(), + &msg_verify, + &[], + ); + assert!(res.is_ok()); + + workers.iter().enumerate().for_each(|(i, worker)| { + let msg = msg::ExecuteMsg::Vote { + poll_id: 1u64.into(), + votes: vec![ + Vote::SucceededOnChain, + Vote::FailedOnChain, + Vote::NotFound, + if i % 2 == 0 { + Vote::SucceededOnChain + } else { + Vote::FailedOnChain + }, + ], + }; + + let res = app.execute_contract(worker.address.clone(), contract_address.clone(), &msg, &[]); + assert!(res.is_ok()); + }); + + app.update_block(|block| block.height += POLL_BLOCK_EXPIRY); + + let msg = msg::ExecuteMsg::EndPoll { + poll_id: 1u64.into(), + }; + let res = app.execute_contract(Addr::unchecked(SENDER), contract_address.clone(), &msg, &[]); + assert!(res.is_ok()); + + let query: msg::QueryMsg = msg::QueryMsg::GetMessagesStatus { + messages: messages.clone(), + }; + let res: Result, _> = app + .wrap() + .query_wasm_smart(contract_address.clone(), &query); + assert!(res.is_ok()); + assert_eq!( + res.unwrap(), + vec![ + ( + messages[0].cc_id.clone(), + VerificationStatus::SucceededOnChain + ), + (messages[1].cc_id.clone(), VerificationStatus::FailedOnChain), + (messages[2].cc_id.clone(), VerificationStatus::NotFound), + ( + messages[3].cc_id.clone(), + VerificationStatus::FailedToVerify + ) + ] + ); + + let res = app.execute_contract( + Addr::unchecked(SENDER), + contract_address.clone(), + &msg_verify, + &[], + ); + assert!(res.is_ok()); + + let res: Result, _> = + app.wrap().query_wasm_smart(contract_address, &query); + assert!(res.is_ok()); + assert_eq!( + res.unwrap(), + vec![ + ( + messages[0].cc_id.clone(), + VerificationStatus::SucceededOnChain + ), + (messages[1].cc_id.clone(), VerificationStatus::FailedOnChain), + (messages[2].cc_id.clone(), VerificationStatus::InProgress), + (messages[3].cc_id.clone(), VerificationStatus::InProgress) + ] + ); +} + #[test] fn should_query_message_statuses() { let mut app = App::default(); @@ -292,15 +400,15 @@ fn should_query_message_statuses() { reply.verification_statuses, messages .iter() - .map(|message| (message.cc_id.clone(), false)) - .collect::>() + .map(|message| (message.cc_id.clone(), VerificationStatus::None)) + .collect::>() ); - let query = msg::QueryMsg::IsVerified { + let query = msg::QueryMsg::GetMessagesStatus { messages: messages.clone(), }; - let statuses: Vec<(CrossChainId, bool)> = app + let statuses: Vec<(CrossChainId, VerificationStatus)> = app .wrap() .query_wasm_smart(contract_address.clone(), &query) .unwrap(); @@ -309,8 +417,8 @@ fn should_query_message_statuses() { statuses, messages .iter() - .map(|message| (message.cc_id.clone(), false)) - .collect::>() + .map(|message| (message.cc_id.clone(), VerificationStatus::InProgress)) + .collect::>() ); let msg: msg::ExecuteMsg = msg::ExecuteMsg::Vote { @@ -323,7 +431,7 @@ fn should_query_message_statuses() { Vote::NotFound } }) - .collect::>(), + .collect::>(), }; app.execute_contract( @@ -349,7 +457,7 @@ fn should_query_message_statuses() { app.execute_contract(Addr::unchecked(SENDER), contract_address.clone(), &msg, &[]) .unwrap(); - let statuses: Vec<(CrossChainId, bool)> = app + let statuses: Vec<(CrossChainId, VerificationStatus)> = app .wrap() .query_wasm_smart(contract_address.clone(), &query) .unwrap(); @@ -359,8 +467,15 @@ fn should_query_message_statuses() { messages .iter() .enumerate() - .map(|(i, message)| (message.cc_id.clone(), i % 2 == 0)) - .collect::>() + .map(|(i, message)| ( + message.cc_id.clone(), + if i % 2 == 0 { + VerificationStatus::SucceededOnChain + } else { + VerificationStatus::NotFound + } + )) + .collect::>() ); } @@ -381,12 +496,12 @@ fn should_start_worker_set_confirmation() { let res = app.execute_contract(Addr::unchecked(SENDER), contract_address.clone(), &msg, &[]); assert!(res.is_ok()); - let query = msg::QueryMsg::IsWorkerSetVerified { + let query = msg::QueryMsg::GetWorkerSetStatus { new_operators: operators, }; - let res: Result = app.wrap().query_wasm_smart(contract_address, &query); + let res: Result = app.wrap().query_wasm_smart(contract_address, &query); assert!(res.is_ok()); - assert_eq!(res.unwrap(), false); + assert_eq!(res.unwrap(), VerificationStatus::InProgress); } #[test] @@ -434,12 +549,12 @@ fn should_confirm_worker_set() { let res = app.execute_contract(Addr::unchecked(SENDER), contract_address.clone(), &msg, &[]); assert!(res.is_ok()); - let query = msg::QueryMsg::IsWorkerSetVerified { + let query = msg::QueryMsg::GetWorkerSetStatus { new_operators: operators, }; - let res: Result = app.wrap().query_wasm_smart(contract_address, &query); + let res: Result = app.wrap().query_wasm_smart(contract_address, &query); assert!(res.is_ok()); - assert_eq!(res.unwrap(), true); + assert_eq!(res.unwrap(), VerificationStatus::SucceededOnChain); } #[test] @@ -487,12 +602,12 @@ fn should_not_confirm_worker_set() { let res = app.execute_contract(Addr::unchecked(SENDER), contract_address.clone(), &msg, &[]); assert!(res.is_ok()); - let query = msg::QueryMsg::IsWorkerSetVerified { + let query = msg::QueryMsg::GetWorkerSetStatus { new_operators: operators, }; - let res: Result = app.wrap().query_wasm_smart(contract_address, &query); + let res: Result = app.wrap().query_wasm_smart(contract_address, &query); assert!(res.is_ok()); - assert_eq!(res.unwrap(), false); + assert_eq!(res.unwrap(), VerificationStatus::NotFound); } #[test] @@ -540,14 +655,14 @@ fn should_confirm_worker_set_after_failed() { let res = app.execute_contract(Addr::unchecked(SENDER), contract_address.clone(), &msg, &[]); assert!(res.is_ok()); - let query = msg::QueryMsg::IsWorkerSetVerified { + let query = msg::QueryMsg::GetWorkerSetStatus { new_operators: operators.clone(), }; - let res: Result = app + let res: Result = app .wrap() .query_wasm_smart(contract_address.clone(), &query); assert!(res.is_ok()); - assert_eq!(res.unwrap(), false); + assert_eq!(res.unwrap(), VerificationStatus::NotFound); // try again, and this time vote true let msg = msg::ExecuteMsg::VerifyWorkerSet { @@ -574,12 +689,12 @@ fn should_confirm_worker_set_after_failed() { let res = app.execute_contract(Addr::unchecked(SENDER), contract_address.clone(), &msg, &[]); assert!(res.is_ok()); - let query = msg::QueryMsg::IsWorkerSetVerified { + let query = msg::QueryMsg::GetWorkerSetStatus { new_operators: operators, }; - let res: Result = app.wrap().query_wasm_smart(contract_address, &query); + let res: Result = app.wrap().query_wasm_smart(contract_address, &query); assert!(res.is_ok()); - assert_eq!(res.unwrap(), true); + assert_eq!(res.unwrap(), VerificationStatus::SucceededOnChain); } #[test] diff --git a/packages/axelar-wasm-std/src/lib.rs b/packages/axelar-wasm-std/src/lib.rs index 6cdff67f9..4d3eee689 100644 --- a/packages/axelar-wasm-std/src/lib.rs +++ b/packages/axelar-wasm-std/src/lib.rs @@ -3,6 +3,7 @@ pub use crate::{ fn_ext::FnExt, snapshot::{Participant, Snapshot}, threshold::{MajorityThreshold, Threshold}, + verification::VerificationStatus, }; pub mod counter; @@ -17,4 +18,5 @@ pub mod operators; pub mod snapshot; pub mod threshold; pub mod utils; +pub mod verification; pub mod voting; diff --git a/packages/axelar-wasm-std/src/verification.rs b/packages/axelar-wasm-std/src/verification.rs new file mode 100644 index 000000000..27e77cce1 --- /dev/null +++ b/packages/axelar-wasm-std/src/verification.rs @@ -0,0 +1,36 @@ +use cosmwasm_schema::cw_serde; + +#[cw_serde] +#[derive(Copy)] +pub enum VerificationStatus { + SucceededOnChain, + FailedOnChain, + NotFound, + FailedToVerify, // verification process failed, e.g. no consensus reached + InProgress, // verification in progress + None, // not yet verified, e.g. not in a poll +} + +impl VerificationStatus { + pub fn is_confirmed(&self) -> bool { + matches!( + self, + VerificationStatus::SucceededOnChain | VerificationStatus::FailedOnChain + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn verification_status_is_confirmed() { + assert!(VerificationStatus::SucceededOnChain.is_confirmed()); + assert!(VerificationStatus::FailedOnChain.is_confirmed()); + assert!(!VerificationStatus::NotFound.is_confirmed()); + assert!(!VerificationStatus::FailedToVerify.is_confirmed()); + assert!(!VerificationStatus::InProgress.is_confirmed()); + assert!(!VerificationStatus::None.is_confirmed()); + } +} From 35607fee8c66788eed0bd3c93978874603d16f7b Mon Sep 17 00:00:00 2001 From: Christian Gorenflo Date: Thu, 25 Jan 2024 16:18:29 -0500 Subject: [PATCH 08/54] refactor: move finalizer picker into finalizer module (#241) --- ampd/src/evm/finalizer.rs | 16 ++++++++++++++++ ampd/src/evm/mod.rs | 21 --------------------- ampd/src/handlers/evm_verify_msg.rs | 13 ++++++------- ampd/src/handlers/evm_verify_worker_set.rs | 13 ++++++------- 4 files changed, 28 insertions(+), 35 deletions(-) diff --git a/ampd/src/evm/finalizer.rs b/ampd/src/evm/finalizer.rs index d10881b14..ef6ef31c7 100644 --- a/ampd/src/evm/finalizer.rs +++ b/ampd/src/evm/finalizer.rs @@ -1,3 +1,4 @@ +use crate::evm::ChainName; use async_trait::async_trait; use error_stack::{self, Report, ResultExt}; use ethers::types::U64; @@ -14,6 +15,21 @@ pub trait Finalizer: Send + Sync { async fn latest_finalized_block_height(&self) -> Result; } +pub fn pick<'a, C, H>( + chain: &'a ChainName, + rpc_client: &'a C, + confirmation_height: H, +) -> Box +where + C: EthereumClient + Send + Sync, + H: Into, +{ + match chain { + ChainName::Ethereum => Box::new(EthereumFinalizer::new(rpc_client)), + ChainName::Other(_) => Box::new(PoWFinalizer::new(rpc_client, confirmation_height)), + } +} + pub struct EthereumFinalizer<'a, C> where C: EthereumClient, diff --git a/ampd/src/evm/mod.rs b/ampd/src/evm/mod.rs index 3e6800a89..08422f619 100644 --- a/ampd/src/evm/mod.rs +++ b/ampd/src/evm/mod.rs @@ -1,7 +1,6 @@ use std::fmt::Display; use enum_display_derive::Display; -use ethers::types::U64; use serde::{Deserialize, Serialize}; pub mod error; @@ -22,26 +21,6 @@ impl PartialEq for ChainName { } } -impl ChainName { - pub fn finalizer<'a, C, H>( - &'a self, - rpc_client: &'a C, - confirmation_height: H, - ) -> Box - where - C: json_rpc::EthereumClient + Send + Sync, - H: Into, - { - match self { - ChainName::Ethereum => Box::new(finalizer::EthereumFinalizer::new(rpc_client)), - ChainName::Other(_) => Box::new(finalizer::PoWFinalizer::new( - rpc_client, - confirmation_height, - )), - } - } -} - #[cfg(test)] mod tests { use crate::evm; diff --git a/ampd/src/handlers/evm_verify_msg.rs b/ampd/src/handlers/evm_verify_msg.rs index e0d6643a4..219a806cc 100644 --- a/ampd/src/handlers/evm_verify_msg.rs +++ b/ampd/src/handlers/evm_verify_msg.rs @@ -20,7 +20,7 @@ use voting_verifier::msg::ExecuteMsg; use crate::event_processor::EventHandler; use crate::evm::json_rpc::EthereumClient; use crate::evm::verifier::verify_message; -use crate::evm::ChainName; +use crate::evm::{finalizer, ChainName}; use crate::handlers::errors::Error; use crate::handlers::errors::Error::DeserializeEvent; use crate::queue::queued_broadcaster::BroadcasterClient; @@ -96,12 +96,11 @@ where where T: IntoIterator, { - let latest_finalized_block_height = self - .chain - .finalizer(&self.rpc_client, confirmation_height) - .latest_finalized_block_height() - .await - .change_context(Error::Finalizer)?; + let latest_finalized_block_height = + finalizer::pick(&self.chain, &self.rpc_client, confirmation_height) + .latest_finalized_block_height() + .await + .change_context(Error::Finalizer)?; Ok(join_all( tx_hashes diff --git a/ampd/src/handlers/evm_verify_worker_set.rs b/ampd/src/handlers/evm_verify_worker_set.rs index 318829ff1..fed1f0e9a 100644 --- a/ampd/src/handlers/evm_verify_worker_set.rs +++ b/ampd/src/handlers/evm_verify_worker_set.rs @@ -18,7 +18,7 @@ use voting_verifier::msg::ExecuteMsg; use crate::event_processor::EventHandler; use crate::evm::verifier::verify_worker_set; -use crate::evm::{json_rpc::EthereumClient, ChainName}; +use crate::evm::{finalizer, json_rpc::EthereumClient, ChainName}; use crate::handlers::errors::Error; use crate::queue::queued_broadcaster::BroadcasterClient; use crate::types::{EVMAddress, Hash, TMAddress, U256}; @@ -93,12 +93,11 @@ where tx_hash: Hash, confirmation_height: u64, ) -> Result> { - let latest_finalized_block_height = self - .chain - .finalizer(&self.rpc_client, confirmation_height) - .latest_finalized_block_height() - .await - .change_context(Error::Finalizer)?; + let latest_finalized_block_height = + finalizer::pick(&self.chain, &self.rpc_client, confirmation_height) + .latest_finalized_block_height() + .await + .change_context(Error::Finalizer)?; let tx_receipt = self .rpc_client .transaction_receipt(tx_hash) From ef961ec8a16092702ac159724a1c8329cd8a0b35 Mon Sep 17 00:00:00 2001 From: Houmaan Chamani Date: Fri, 26 Jan 2024 12:33:31 -0500 Subject: [PATCH 09/54] feat: axe-2989-Handle-Expired-Signing-Skip (#246) * chore: work in progress - removing grace from multisig config * fix: remove grace period from contract.rs, update env block change to reflect test parameters * feat: add skip expire signing session to multisig.rs * fix: remove unused tracing info_span crate * test: add should_not_handle_event_if_session_expired to multisig.rs --- ampd/src/handlers/multisig.rs | 44 ++++++++++++++++++++++++++++++++++- ampd/src/lib.rs | 1 + 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/ampd/src/handlers/multisig.rs b/ampd/src/handlers/multisig.rs index 22a4d9556..2bf4115cd 100644 --- a/ampd/src/handlers/multisig.rs +++ b/ampd/src/handlers/multisig.rs @@ -9,6 +9,7 @@ use error_stack::ResultExt; use hex::encode; use serde::de::Error as DeserializeError; use serde::{Deserialize, Deserializer}; +use tokio::sync::watch::Receiver; use tracing::info; use events::Error::EventTypeMismatch; @@ -34,6 +35,7 @@ struct SigningStartedEvent { pub_keys: HashMap, #[serde(with = "hex")] msg: MessageDigest, + expires_at: u64, } fn deserialize_public_keys<'de, D>( @@ -74,6 +76,7 @@ where multisig: TMAddress, broadcaster: B, signer: SharableEcdsaClient, + latest_block_height: Receiver, } impl Handler @@ -85,12 +88,14 @@ where multisig: TMAddress, broadcaster: B, signer: SharableEcdsaClient, + latest_block_height: Receiver, ) -> Self { Self { worker, multisig, broadcaster, signer, + latest_block_height, } } @@ -132,6 +137,7 @@ where session_id, pub_keys, msg, + expires_at, } = match event.try_into() as error_stack::Result<_, _> { Err(report) if matches!(report.current_context(), EventTypeMismatch(_)) => { return Ok(()); @@ -149,6 +155,15 @@ where "get signing request", ); + let latest_block_height = *self.latest_block_height.borrow(); + if latest_block_height >= expires_at { + info!( + session_id = session_id.to_string(), + "skipping expired signing session" + ); + return Ok(()); + } + match pub_keys.get(&self.worker) { Some(pub_key) => { let signature = self @@ -189,6 +204,7 @@ mod test { use rand::rngs::OsRng; use rand::Rng; use tendermint::abci; + use tokio::sync::watch; use multisig::events::Event::SigningStarted; use multisig::key::PublicKey; @@ -266,6 +282,7 @@ mod test { worker: TMAddress, multisig: TMAddress, signer: SharableEcdsaClient, + latest_block_height: u64, ) -> Handler { let mut broadcaster = MockBroadcaster::new(); broadcaster @@ -275,7 +292,9 @@ mod test { let (broadcaster, _) = QueuedBroadcaster::new(broadcaster, Gas::default(), 100, Duration::from_secs(5)); - Handler::new(worker, multisig, broadcaster.client(), signer) + let (tx, rx) = watch::channel(latest_block_height); + + Handler::new(worker, multisig, broadcaster.client(), signer, rx) } #[test] @@ -344,6 +363,7 @@ mod test { rand_account(), rand_account(), SharableEcdsaClient::new(client), + 100u64, ); assert!(handler.handle(&signing_started_event()).await.is_ok()); @@ -360,6 +380,7 @@ mod test { rand_account(), TMAddress::from(MULTISIG_ADDRESS.parse::().unwrap()), SharableEcdsaClient::new(client), + 100u64, ); assert!(handler.handle(&signing_started_event()).await.is_ok()); @@ -379,6 +400,7 @@ mod test { worker, TMAddress::from(MULTISIG_ADDRESS.parse::().unwrap()), SharableEcdsaClient::new(client), + 99u64, ); assert!(matches!( @@ -386,4 +408,24 @@ mod test { Error::Sign )); } + + #[tokio::test] + async fn should_not_handle_event_if_session_expired() { + let mut client = MockEcdsaClient::new(); + client + .expect_sign() + .returning(move |_, _, _| Err(Report::from(tofnd::error::Error::SignFailed))); + + let event = signing_started_event(); + let signing_started: SigningStartedEvent = ((&event).try_into() as Result<_, _>).unwrap(); + let worker = signing_started.pub_keys.keys().next().unwrap().clone(); + let handler = get_handler( + worker, + TMAddress::from(MULTISIG_ADDRESS.parse::().unwrap()), + SharableEcdsaClient::new(client), + 101u64, + ); + + assert!(handler.handle(&signing_started_event()).await.is_ok()); + } } diff --git a/ampd/src/lib.rs b/ampd/src/lib.rs index 5945fa2df..f1bcf9f6b 100644 --- a/ampd/src/lib.rs +++ b/ampd/src/lib.rs @@ -228,6 +228,7 @@ where cosmwasm_contract, self.broadcaster.client(), self.ecdsa_client.clone(), + self.block_height_monitor.latest_block_height(), ), ), handlers::config::Config::SuiMsgVerifier { From d41029f2a911134b4337225f84c2d5bc1ea72deb Mon Sep 17 00:00:00 2001 From: Houmaan Chamani Date: Fri, 26 Jan 2024 16:02:46 -0500 Subject: [PATCH 10/54] docs: update voting_verifier.md (#237) * docs: update voting_verifier.md * docs: remove entity structure code and move description to the top * update: doc/src/contracts/voting_verifier.md Co-authored-by: CJ Cobb <46455409+cjcobb23@users.noreply.github.com> * fix: added link to prover sequence diagram * fix: change link from weblink to relative --------- Co-authored-by: CJ Cobb <46455409+cjcobb23@users.noreply.github.com> --- doc/src/contracts/voting_verifier.md | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/doc/src/contracts/voting_verifier.md b/doc/src/contracts/voting_verifier.md index a917f767c..2c9200b97 100644 --- a/doc/src/contracts/voting_verifier.md +++ b/doc/src/contracts/voting_verifier.md @@ -1,13 +1,18 @@ # Voting Verifier The voting verifier verifies batches of messages via RPC voting. Polls are created and votes are cast via a generic voting module, -which the voting verifier uses. The generic voting module does not know the meaning of the polls, and simply returns a poll -poll ID to the voting verifier. The voting verifier internally maps -a poll ID to the messages in the poll, to be able to call back to +which the voting verifier uses. The generic voting module does not know the meaning of the Polls, and simply returns a +Poll ID to the voting verifier. The voting verifier internally maps +a Poll ID to the messages in the Poll, to be able to call back to the verifier and propagate the result back to the gateway. +There are two types of polls: messages polls and worker set polls. Messages polls are used to verify incoming messages, while worker set polls are used to verify that the external gateway has updated it's stored worker set. Worker set polls are a necessary component of the worker set update flow. See [`update and confirm WorkerSet sequence diagram`](multisig_prover.md) +for more details. + +## Verfier graph + ```mermaid flowchart TD subgraph Axelar @@ -16,11 +21,10 @@ subgraph Axelar V{"Verifier"} R{"Service Registry"} end -OC{"Off-Chain Procceses"} +OC{"Workers"} G--"VerifyMessages([M, M', M''])"-->V V--"VerifyMessages([M, M', M''])"-->Vr -OC --"StartPoll([M, M', M''])"-->Vr Vr--"GetActiveWorkers"-->R OC--"Vote(poll_id, votes)"-->Vr OC--"EndPoll(poll_id)"-->Vr @@ -28,17 +32,21 @@ Vr--"MessagesVerified([M,M',M''])"-->V ``` + + +## Message Verification Sequence Diagram + + ```mermaid sequenceDiagram participant Verifier participant Voting Verifier participant Service Registry -participant OC as Off-Chain Processes +participant OC as Workers Verifier->>Voting Verifier: VerifyMessages([M,M',M'']) -Voting Verifier->>Voting Verifier: StartPoll([M,M',M'']) Voting Verifier->>Service Registry: GetActiveWorkers Service Registry-->>Voting Verifier: list of workers and stake Voting Verifier->>OC: emit event with poll_id and messages From 8b7f76fc3d07617811bcc80fc3c69bdce0e05acc Mon Sep 17 00:00:00 2001 From: CJ Cobb <46455409+cjcobb23@users.noreply.github.com> Date: Fri, 26 Jan 2024 16:39:33 -0500 Subject: [PATCH 11/54] fix(multisig-prover): don't prevent signing due to multiple workerset rotations (#235) * fix(multisig-prover): Don't prevent signing due to multiple rotations * feat(save_next_worker_set): logic to handle multiple error types * refactor(worker_set_cannot_be_updated_again_while_pending_worker_is_not_yet_confirmed): test breakdown and add comments * fix: update create_new_workers_vec to a more functional approach Co-authored-by: Christian Gorenflo * fix: update update_registry_and_construct_proof's input names * fix: remove redundant variable assignment for construct_proof_and_sign * fix: update function name from process_poll to execute_worker_set_poll Co-authored-by: Christian Gorenflo * fix: first test_utils::execute_worker_set_poll comment update and second_wave_of_new_workers inlining * fix: update comments for test_utils::update_registry_and_construct_proof * refactor: remove redundant function call comments, inline worker vector creation * refactor: comment update for first_wave_of_new_workers and first_wave_worker_set * refactor: comment update for first update_registry_and_construct_proof * refactor: move worker set creation into execute_worker_set_poll --------- Co-authored-by: maancham Co-authored-by: Christian Gorenflo --- contracts/multisig-prover/src/execute.rs | 14 ++- integration-tests/tests/test_utils/mod.rs | 82 ++++++++++++++- integration-tests/tests/update_worker_set.rs | 100 +++++++++++++++++++ 3 files changed, 192 insertions(+), 4 deletions(-) diff --git a/contracts/multisig-prover/src/execute.rs b/contracts/multisig-prover/src/execute.rs index 0a2677e13..ab3290bd0 100644 --- a/contracts/multisig-prover/src/execute.rs +++ b/contracts/multisig-prover/src/execute.rs @@ -39,8 +39,15 @@ pub fn construct_proof( let mut builder = CommandBatchBuilder::new(config.destination_chain_id, config.encoder); if let Some(new_worker_set) = new_worker_set { - save_next_worker_set(deps.storage, &new_worker_set)?; - builder.add_new_worker_set(new_worker_set)?; + match save_next_worker_set(deps.storage, &new_worker_set) { + Ok(()) => { + builder.add_new_worker_set(new_worker_set)?; + } + Err(ContractError::WorkerSetConfirmationInProgress) => {} + Err(other_error) => { + return Err(other_error); + } + } } for msg in messages { @@ -179,7 +186,8 @@ fn save_next_worker_set( return Err(ContractError::WorkerSetConfirmationInProgress); } - Ok(NEXT_WORKER_SET.save(storage, new_worker_set)?) + NEXT_WORKER_SET.save(storage, new_worker_set)?; + Ok(()) } pub fn update_worker_set(deps: DepsMut, env: Env) -> Result { diff --git a/integration-tests/tests/test_utils/mod.rs b/integration-tests/tests/test_utils/mod.rs index 78e3ae72b..f2df62b1c 100644 --- a/integration-tests/tests/test_utils/mod.rs +++ b/integration-tests/tests/test_utils/mod.rs @@ -573,6 +573,86 @@ pub fn workers_to_worker_set(protocol: &mut Protocol, workers: &Vec) -> ) } +pub fn create_new_workers_vec( + chains: Vec, + worker_details: Vec<(String, u32)>, +) -> Vec { + worker_details + .into_iter() + .map(|(name, seed)| Worker { + addr: Addr::unchecked(name), + supported_chains: chains.clone(), + key_pair: generate_key(seed), + }) + .collect() +} + +pub fn update_registry_and_construct_proof( + protocol: &mut Protocol, + new_workers: &Vec, + workers_to_remove: &Vec, + current_workers: &Vec, + chain_multisig_prover_address: &Addr, + min_worker_bond: Uint128, +) -> Uint64 { + // Register new workers + register_workers( + &mut protocol.app, + protocol.service_registry_address.clone(), + protocol.multisig_address.clone(), + protocol.governance_address.clone(), + protocol.genesis_address.clone(), + new_workers, + protocol.service_name.clone(), + min_worker_bond, + ); + + // Deregister old workers + deregister_workers( + &mut protocol.app, + protocol.service_registry_address.clone(), + protocol.governance_address.clone(), + workers_to_remove, + protocol.service_name.clone(), + ); + + // Construct proof and sign + construct_proof_and_sign( + &mut protocol.app, + &chain_multisig_prover_address, + &protocol.multisig_address, + &Vec::::new(), + ¤t_workers, + ) +} + +pub fn execute_worker_set_poll( + protocol: &mut Protocol, + relayer_addr: &Addr, + verifier_address: &Addr, + new_workers: &Vec, +) { + // Create worker set + let new_worker_set = workers_to_worker_set(protocol, new_workers); + + // Create worker set poll + let (poll_id, expiry) = create_worker_set_poll( + &mut protocol.app, + relayer_addr.clone(), + verifier_address.clone(), + new_worker_set.clone(), + ); + + // Vote for the worker set + vote_true_for_worker_set(&mut protocol.app, verifier_address, new_workers, poll_id); + + // Advance to expiration height + advance_at_least_to_height(&mut protocol.app, expiry); + + // End the poll + end_poll(&mut protocol.app, verifier_address, poll_id); +} + #[derive(Clone)] pub struct Chain { pub gateway_address: Addr, @@ -618,7 +698,7 @@ pub fn setup_chain(protocol: &mut Protocol, chain_name: ChainName) -> Chain { signing_threshold: Threshold::try_from((2, 3)).unwrap().try_into().unwrap(), service_name: protocol.service_name.to_string(), chain_name: chain_name.to_string(), - worker_set_diff_threshold: 1, + worker_set_diff_threshold: 0, encoder: multisig_prover::encoding::Encoder::Abi, key_type: multisig::key::KeyType::Ecdsa, }, diff --git a/integration-tests/tests/update_worker_set.rs b/integration-tests/tests/update_worker_set.rs index 5ac278db8..34c800ce0 100644 --- a/integration-tests/tests/update_worker_set.rs +++ b/integration-tests/tests/update_worker_set.rs @@ -1,5 +1,6 @@ use connection_router::Message; use cosmwasm_std::Addr; +use cw_multi_test::Executor; use test_utils::Worker; mod test_utils; @@ -225,3 +226,102 @@ fn worker_set_can_be_initialized_and_then_automatically_updated_during_proof_con assert_eq!(new_worker_set, expected_new_worker_set); } + +#[test] +fn worker_set_cannot_be_updated_again_while_pending_worker_is_not_yet_confirmed() { + let chains = vec![ + "Ethereum".to_string().try_into().unwrap(), + "Polygon".to_string().try_into().unwrap(), + ]; + let (mut protocol, ethereum, _, initial_workers, min_worker_bond) = + test_utils::setup_test_case(); + + let simulated_worker_set = test_utils::workers_to_worker_set(&mut protocol, &initial_workers); + + let worker_set = + test_utils::get_worker_set(&mut protocol.app, ðereum.multisig_prover_address); + + assert_eq!(worker_set, simulated_worker_set); + + // creating a new worker set that only consists of two new workers + let first_wave_of_new_workers = test_utils::create_new_workers_vec( + chains.clone(), + vec![("worker3".to_string(), 2), ("worker4".to_string(), 3)], + ); + + let first_wave_worker_set = + test_utils::workers_to_worker_set(&mut protocol, &first_wave_of_new_workers); + + // register the new workers (3 and 4), deregister all old workers, then create proof and get id + let session_id = test_utils::update_registry_and_construct_proof( + &mut protocol, + &first_wave_of_new_workers, + &initial_workers, + &initial_workers, + ðereum.multisig_prover_address, + min_worker_bond, + ); + + let proof = test_utils::get_proof( + &mut protocol.app, + ðereum.multisig_prover_address, + &session_id, + ); + + // proof must be completed + assert!(matches!( + proof.status, + multisig_prover::msg::ProofStatus::Completed { .. } + )); + assert_eq!(proof.message_ids.len(), 0); + + // starting and ending a poll for the first worker set rotation + test_utils::execute_worker_set_poll( + &mut protocol, + &Addr::unchecked("relayer"), + ðereum.voting_verifier_address, + &first_wave_of_new_workers, + ); + + // try to rotate again. this should be ignored, because the first rotation is not yet confirmed + let second_wave_of_new_workers = + test_utils::create_new_workers_vec(chains.clone(), vec![("worker5".to_string(), 5)]); + + let second_wave_session_id = test_utils::update_registry_and_construct_proof( + &mut protocol, + &second_wave_of_new_workers, + &first_wave_of_new_workers, + &initial_workers, + ðereum.multisig_prover_address, + min_worker_bond, + ); + + // confirm the first rotation's set of workers + test_utils::confirm_worker_set( + &mut protocol.app, + Addr::unchecked("relayer"), + ethereum.multisig_prover_address.clone(), + ); + + // get the latest worker set, it should be equal to the first wave worker set + let latest_worker_set = + test_utils::get_worker_set(&mut protocol.app, ðereum.multisig_prover_address); + assert_eq!(latest_worker_set, first_wave_worker_set); + + // attempt to confirm the second rotation + test_utils::execute_worker_set_poll( + &mut protocol, + &Addr::unchecked("relayer"), + ðereum.voting_verifier_address, + &second_wave_of_new_workers, + ); + + let response = protocol.app.execute_contract( + Addr::unchecked("relayer"), + ethereum.multisig_prover_address.clone(), + &multisig_prover::msg::ExecuteMsg::ConfirmWorkerSet, + &[], + ); + + assert!(response.is_err()); +} From b9074b5cc62452af319bca4044abcf35c6b8f8c8 Mon Sep 17 00:00:00 2001 From: Talal Ashraf Date: Mon, 29 Jan 2024 10:54:15 -0500 Subject: [PATCH 12/54] chore: add release pipeline (#239) --- .github/actions/release/action.yaml | 164 +++++++++++++++++++ .github/workflows/release.yaml | 74 +++++++++ README.md | 7 + integration-tests/release.toml | 1 + packages/axelar-wasm-std-derive/release.toml | 1 + packages/axelar-wasm-std/release.toml | 1 + packages/connection-router-api/release.toml | 1 + packages/events-derive/release.toml | 1 + packages/events/release.toml | 1 + packages/gateway-api/release.toml | 1 + packages/report/release.toml | 1 + release.toml | 5 + 12 files changed, 258 insertions(+) create mode 100644 .github/actions/release/action.yaml create mode 100644 .github/workflows/release.yaml create mode 100644 integration-tests/release.toml create mode 100644 packages/axelar-wasm-std-derive/release.toml create mode 100644 packages/axelar-wasm-std/release.toml create mode 100644 packages/connection-router-api/release.toml create mode 100644 packages/events-derive/release.toml create mode 100644 packages/events/release.toml create mode 100644 packages/gateway-api/release.toml create mode 100644 packages/report/release.toml create mode 100644 release.toml diff --git a/.github/actions/release/action.yaml b/.github/actions/release/action.yaml new file mode 100644 index 000000000..39bf1355b --- /dev/null +++ b/.github/actions/release/action.yaml @@ -0,0 +1,164 @@ +--- +name: 'Release' +description: 'Wrapper around the Semver library to create releases' +inputs: + binary-to-release: + description: "Name of binary to release" + required: true + default: 'ampd' + dry-run: + description: "When true, just output plan" + required: true + default: 'true' + major-pattern: + description: "major pattern match string" + minor-pattern: + description: "minor pattern match string" + change-path: + description: "paths to observe for changes" + +runs: + using: "composite" + steps: + - name: Print inputs + id: print-inputs + shell: bash + run: | + echo "binary-to-release: + ${{ inputs.binary-to-release }}" + echo "dry-run: + ${{ inputs.dry-run }}" + echo "major-pattern: + ${{ inputs.major-pattern }}" + echo "minor-pattern: + ${{ inputs.minor-pattern }}" + echo "change-path: + ${{ inputs.change-path }}" + + - name: Determine next semantic version + id: semantic-version + uses: paulhatch/semantic-version@v5.3.0 + with: + major_pattern: ${{ inputs.major-pattern }} + minor_pattern: ${{ inputs.minor-pattern }} + change_path: ${{ inputs.change-path }} + tag_prefix: ${{ inputs.binary-to-release }}-v + version_from_branch: false + + - name: Print determined semantic version + id: print-semantic-version + shell: bash + run: | + echo "MAJOR: + ${{ steps.semantic-version.outputs.major }}" + echo "MINOR: + ${{ steps.semantic-version.outputs.minor }}" + echo "PATCH: + ${{ steps.semantic-version.outputs.patch }}" + echo "VERSION: + ${{ steps.semantic-version.outputs.version }}" + echo "VERSION-TAG: + ${{ steps.semantic-version.outputs.version_tag }}" + echo "VERSION-TYPE: + ${{ steps.semantic-version.outputs.version_type }}" + echo "IS-TAGGED: + ${{ steps.semantic-version.outputs.is_tagged }}" + echo "CHANGED: + ${{ steps.semantic-version.outputs.changed }}" + echo "PREVIOUS-VERSION: + ${{ steps.semantic-version.outputs.previous_version }}" + + - name: Check if tag already exists (possible collision with an orphaned + commit tagged as patch) + id: validate-tag + shell: bash + if: ${{ steps.semantic-version.outputs.changed == 'true' }} + run: | + if [[ + ! -z "$(git tag -l ${{ steps.semantic-version.outputs.version_tag }})" + ]]; then + cat << EOF + Tag already exists: ${{ steps.semantic-version.outputs.version_tag }} + This means that there is a commit tagged as patch that is not part of + the main branch. Under these circumstances the preferred way to + release is to create a new minor release from the main branch + + However, if you must release a patch, please follow the steps below + + Please check the tags and use the patch commit as the base for the + new release. + + Retreive the latest patch commit from the tag: + git tag --list ${{inputs.binary-to-release}}-v* + + Checkout the tag: + git checkout + + Create a new branch from the commit: + git checkout -b patch/${{inputs.binary-to-release}}/ + + Cherry pick current changes to the new branch: + git cherry-pick ${{ github.sha }} + + Push the new branch: + git push origin + + Create a PR from the new branch to the previous patch tag + hub pull-request -b + + Once the PR is approved, run the release workflow and choose the + branch created above as the base branch. Note that this patch will not + be part of the main branch unless explicitly merged into it. And none + of the commits from the main branch since the last patch will be part + of this release. + EOF + exit 1 + else + echo "Tag is unique. OK to proceed" + fi + + - name: Check major and minor releases are from main branch only + id: validate-branch + shell: bash + if: + steps.semantic-version.outputs.changed == 'true' && + (steps.semantic-version.outputs.version_type == 'major' || + steps.semantic-version.outputs.version_type == 'minor') + run: | + if [[ "${{ github.ref }}" != "refs/heads/main" ]]; then + echo "Major and Minor releases are only allowed from main branch" + exit 1 + else + echo "Release from main branch. OK to proceed" + fi + + - name: Install cargo-release + shell: bash + working-directory: ${{ runner.temp }} + run: | + wget -q https://github.com/crate-ci/cargo-release/releases/download/v0.25.4/cargo-release-v0.25.4-x86_64-unknown-linux-gnu.tar.gz + tar -zxf cargo-release-v0.25.4-x86_64-unknown-linux-gnu.tar.gz + mv cargo-release /home/runner/.cargo/bin/cargo-release + + - name: Release cargo crate (dry run) + shell: bash + if: + inputs.dry-run == 'true' && + steps.semantic-version.outputs.changed == 'true' + run: | + cargo release -p ${{ inputs.binary-to-release }} \ + ${{ steps.semantic-version.outputs.version_type }} \ + -v + + - name: Release cargo crate + shell: bash + if: + inputs.dry-run == 'false' && + steps.semantic-version.outputs.changed == 'true' + run: | + git config --global user.email "devops@axelar.network" + git config --global user.name "Axelar DevOps" + cargo release -x \ + --no-confirm \ + -p ${{ inputs.binary-to-release }} \ + ${{ steps.semantic-version.outputs.version_type }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..995f8291f --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,74 @@ +name: Release + +on: + workflow_dispatch: + inputs: + binary-to-release: + description: Binary to release + type: choice + options: + - ampd + - agggregate-verifier + - batching + - connection-router + - gateway + - multisig + - multisig-prover + - nexus-gateway + - rewards + - service-registry + - voting-verifier + dry-run: + description: Dry run + type: boolean + default: true + +jobs: + + release: + name: Release ${{ github.event.inputs.binary-to-release }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@v3 + with: + fetch-depth: '0' + - name: Setup variables for sub-project to release + id: setup-variables + shell: bash + run: | + binary="${{ github.event.inputs.binary-to-release }}" + declare -A binaries_data=( + ["ampd"]="ampd,major-ampd,minor-ampd," + ["aggregate-verifier"]="aggregate-verifier,/(major-aggregate-verifier)|(major-contracts)/,/(minor-aggregate-verifier)|(minor-contracts)/,contracts/aggregate-verifier packages" + ["batching"]="batching,/(major-batching)|(major-contracts)/,/(minor-batching)|(minor-contracts)/,contracts/batching packages" + ["connection-router"]="connection-router,/(major-connection-router)|(major-contracts)/,/(minor-connection-router)|(minor-contracts)/,contracts/connection-router packages" + ["gateway"]="gateway,/(major-gateway)|(major-contracts)/,/(minor-gateway)|(minor-contracts)/,contracts/gateway packages" + ["multisig"]="multisig,/(major-multisig)|(major-contracts)/,/(minor-multisig)|(minor-contracts)/,contracts/multisig packages" + ["multisig-prover"]="multisig-prover,/(major-multisig-prover)|(major-contracts)/,/(minor-multisig-prover)|(minor-contracts)/,contracts/multisig-prover packages" + ["nexus-gateway"]="nexus-gateway,/(major-nexus-gateway)|(major-contracts)/,/(minor-nexus-gateway)|(minor-contracts)/,contracts/nexus-gateway packages" + ["rewards"]="rewards,/(major-rewards)|(major-contracts)/,/(minor-rewards)|(minor-contracts)/,contracts/rewards packages" + ["service-registry"]="service-registry,/(major-service-registry)|(major-contracts)/,/(minor-service-registry)|(minor-contracts)/,contracts/service-registry packages" + ["voting-verifier"]="voting-verifier,/(major-voting-verifier)|(major-contracts)/,/(minor-voting-verifier)|(minor-contracts)/,contracts/voting-verifier packages" + ) + + if [[ -n "${binaries_data[$binary]}" ]]; then + IFS=',' read -r binary_to_release major_pattern minor_pattern change_path <<< "${binaries_data[$binary]}" + echo "binary-to-release=$binary_to_release" >> "$GITHUB_OUTPUT" + echo "major-pattern=$major_pattern" >> "$GITHUB_OUTPUT" + echo "minor-pattern=$minor_pattern" >> "$GITHUB_OUTPUT" + echo "change-path=$change_path" >> "$GITHUB_OUTPUT" + else + echo "Unknown binary to release" + exit 1 + fi + + - name: Release ${{ github.event.inputs.binary-to-release }} + uses: ./.github/actions/release + with: + binary-to-release: ${{ steps.setup-variables.outputs.binary-to-release }} + dry-run: ${{ github.event.inputs.dry-run }} + major-pattern: ${{ steps.setup-variables.outputs.major-pattern }} + minor-pattern: ${{ steps.setup-variables.outputs.minor-pattern }} + change-path: ${{ steps.setup-variables.outputs.change-path }} + diff --git a/README.md b/README.md index b9442586c..a070f39cc 100644 --- a/README.md +++ b/README.md @@ -13,3 +13,10 @@ Json schemas for types used in the contract apis can be generated by navigating ### Development and Testing When developing contracts to integrate with amplifier, the `cw-multi-test` crate can be used to create a simulated blockchain environment, where different contracts can deployed and interacted with, and contracts can interact with each other. See the [integration-tests](integration-tests) package for examples, as well as reusable helper functions. + +### Versioning + +The semver for new releases is calculated automatically based on the commit messages and the folders where changes were made. The configuration for each piece of software released (e.g ampd, gateway...) can be seen in the release.yaml file. You can perform a dry-run using the release action to be sure that the next version is what you intend it to be. The basic rules are as follows: + - a commit with a message that does not include any associated tag (e.g major-contracts) for release will be considered a patch release + - a commit with a message with minor tag e.g `feat(minor-ampd):...` will be considered a minor release, and the same logic applies to major releases + - if no changes are detected in the watched directories, the release will not bump the version. e.g if since last release for the batching contract, no changes were made in the `contracts/major-batching` or `packages/` directory. A new release will not bump the version. diff --git a/integration-tests/release.toml b/integration-tests/release.toml new file mode 100644 index 000000000..954564097 --- /dev/null +++ b/integration-tests/release.toml @@ -0,0 +1 @@ +release = false diff --git a/packages/axelar-wasm-std-derive/release.toml b/packages/axelar-wasm-std-derive/release.toml new file mode 100644 index 000000000..954564097 --- /dev/null +++ b/packages/axelar-wasm-std-derive/release.toml @@ -0,0 +1 @@ +release = false diff --git a/packages/axelar-wasm-std/release.toml b/packages/axelar-wasm-std/release.toml new file mode 100644 index 000000000..954564097 --- /dev/null +++ b/packages/axelar-wasm-std/release.toml @@ -0,0 +1 @@ +release = false diff --git a/packages/connection-router-api/release.toml b/packages/connection-router-api/release.toml new file mode 100644 index 000000000..954564097 --- /dev/null +++ b/packages/connection-router-api/release.toml @@ -0,0 +1 @@ +release = false diff --git a/packages/events-derive/release.toml b/packages/events-derive/release.toml new file mode 100644 index 000000000..954564097 --- /dev/null +++ b/packages/events-derive/release.toml @@ -0,0 +1 @@ +release = false diff --git a/packages/events/release.toml b/packages/events/release.toml new file mode 100644 index 000000000..954564097 --- /dev/null +++ b/packages/events/release.toml @@ -0,0 +1 @@ +release = false diff --git a/packages/gateway-api/release.toml b/packages/gateway-api/release.toml new file mode 100644 index 000000000..954564097 --- /dev/null +++ b/packages/gateway-api/release.toml @@ -0,0 +1 @@ +release = false diff --git a/packages/report/release.toml b/packages/report/release.toml new file mode 100644 index 000000000..954564097 --- /dev/null +++ b/packages/report/release.toml @@ -0,0 +1 @@ +release = false diff --git a/release.toml b/release.toml new file mode 100644 index 000000000..da7a87a48 --- /dev/null +++ b/release.toml @@ -0,0 +1,5 @@ +pre-release-commit-message = "chore: release {{crate_name}} {{version}} [skip ci]" +consolidate-commits = false +release = true +publish = false +allow-branch = ["main","patch/*/*"] From ad64f6867a6e5b08351bd8a68162b024492ac316 Mon Sep 17 00:00:00 2001 From: Houmaan Chamani Date: Wed, 31 Jan 2024 01:39:53 -0500 Subject: [PATCH 13/54] feat: axe-2791 first step, add denounce chain support to service registry (#247) * feat: add denounce chain support ability to service registry * test: add denounce_chain_support testcase * test: add more unit tests for different paths * test: two more unit tests for unregistered worker and service * refactor: add test comments, refactor declare and denounce * fix: address lint issue --- ampd/src/commands/mod.rs | 6 +- ...n_support.rs => register_chain_support.rs} | 2 +- ampd/src/main.rs | 6 +- contracts/service-registry/src/contract.rs | 31 +- contracts/service-registry/src/msg.rs | 10 +- contracts/service-registry/tests/tests.rs | 918 +++++++++++++++++- integration-tests/tests/test_utils/mod.rs | 2 +- 7 files changed, 954 insertions(+), 21 deletions(-) rename ampd/src/commands/{declare_chain_support.rs => register_chain_support.rs} (95%) diff --git a/ampd/src/commands/mod.rs b/ampd/src/commands/mod.rs index fd230ce0f..5fc4b467c 100644 --- a/ampd/src/commands/mod.rs +++ b/ampd/src/commands/mod.rs @@ -23,7 +23,7 @@ use crate::{tofnd, PREFIX}; pub mod bond_worker; pub mod daemon; -pub mod declare_chain_support; +pub mod register_chain_support; pub mod register_public_key; pub mod worker_address; @@ -33,8 +33,8 @@ pub enum SubCommand { Daemon, /// Bond the worker to the service registry contract BondWorker(bond_worker::Args), - /// Declare chain support to the service registry contract - DeclareChainSupport(declare_chain_support::Args), + /// Register chain support to the service registry contract + RegisterChainSupport(register_chain_support::Args), /// Register public key to the multisig contract RegisterPublicKey, /// Query the worker address diff --git a/ampd/src/commands/declare_chain_support.rs b/ampd/src/commands/register_chain_support.rs similarity index 95% rename from ampd/src/commands/declare_chain_support.rs rename to ampd/src/commands/register_chain_support.rs index dbf226378..ff26afe83 100644 --- a/ampd/src/commands/declare_chain_support.rs +++ b/ampd/src/commands/register_chain_support.rs @@ -22,7 +22,7 @@ pub struct Args { pub async fn run(config: Config, state_path: &Path, args: Args) -> Result, Error> { let pub_key = worker_pub_key(state_path, config.tofnd_config.clone()).await?; - let msg = serde_json::to_vec(&ExecuteMsg::DeclareChainSupport { + let msg = serde_json::to_vec(&ExecuteMsg::RegisterChainSupport { service_name: args.service_name.into(), chains: args.chains, }) diff --git a/ampd/src/main.rs b/ampd/src/main.rs index 30ec78d2c..e6a6a9ce7 100644 --- a/ampd/src/main.rs +++ b/ampd/src/main.rs @@ -11,7 +11,7 @@ use tracing::{error, info}; use valuable::Valuable; use ampd::commands::{ - bond_worker, daemon, declare_chain_support, register_public_key, worker_address, SubCommand, + bond_worker, daemon, register_chain_support, register_public_key, worker_address, SubCommand, }; use ampd::config::Config; use ampd::Error; @@ -62,8 +62,8 @@ async fn main() -> ExitCode { }) } Some(SubCommand::BondWorker(args)) => bond_worker::run(cfg, &state_path, args).await, - Some(SubCommand::DeclareChainSupport(args)) => { - declare_chain_support::run(cfg, &state_path, args).await + Some(SubCommand::RegisterChainSupport(args)) => { + register_chain_support::run(cfg, &state_path, args).await } Some(SubCommand::RegisterPublicKey) => register_public_key::run(cfg, &state_path).await, Some(SubCommand::WorkerAddress) => worker_address::run(cfg.tofnd_config, &state_path).await, diff --git a/contracts/service-registry/src/contract.rs b/contracts/service-registry/src/contract.rs index 9fa129e47..4ecd8bbf1 100644 --- a/contracts/service-registry/src/contract.rs +++ b/contracts/service-registry/src/contract.rs @@ -94,10 +94,14 @@ pub fn execute( AuthorizationState::NotAuthorized, ) } - ExecuteMsg::DeclareChainSupport { + ExecuteMsg::RegisterChainSupport { service_name, chains, - } => execute::declare_chains_support(deps, info, service_name, chains), + } => execute::register_chains_support(deps, info, service_name, chains), + ExecuteMsg::DeregisterChainSupport { + service_name, + chains, + } => execute::deregister_chains_support(deps, info, service_name, chains), ExecuteMsg::BondWorker { service_name } => execute::bond_worker(deps, info, service_name), ExecuteMsg::UnbondWorker { service_name } => { execute::unbond_worker(deps, env, info, service_name) @@ -237,7 +241,7 @@ pub mod execute { Ok(Response::new()) } - pub fn declare_chains_support( + pub fn register_chains_support( deps: DepsMut, info: MessageInfo, service_name: String, @@ -258,6 +262,27 @@ pub mod execute { Ok(Response::new()) } + pub fn deregister_chains_support( + deps: DepsMut, + info: MessageInfo, + service_name: String, + chains: Vec, + ) -> Result { + SERVICES + .may_load(deps.storage, &service_name)? + .ok_or(ContractError::ServiceNotFound)?; + + WORKERS + .may_load(deps.storage, (&service_name, &info.sender))? + .ok_or(ContractError::WorkerNotFound)?; + + for chain in chains { + WORKERS_PER_CHAIN.remove(deps.storage, (&service_name, &chain, &info.sender)); + } + + Ok(Response::new()) + } + pub fn unbond_worker( deps: DepsMut, env: Env, diff --git a/contracts/service-registry/src/msg.rs b/contracts/service-registry/src/msg.rs index 9546f7d47..c432ef029 100644 --- a/contracts/service-registry/src/msg.rs +++ b/contracts/service-registry/src/msg.rs @@ -31,11 +31,17 @@ pub enum ExecuteMsg { service_name: String, }, - // Declares support for the specified chains. Called by the worker. - DeclareChainSupport { + // Register support for the specified chains. Called by the worker. + RegisterChainSupport { service_name: String, chains: Vec, }, + // Deregister support for the specified chains. Called by the worker. + DeregisterChainSupport { + service_name: String, + chains: Vec, + }, + // Locks up any funds sent with the message as stake. Called by the worker. BondWorker { service_name: String, diff --git a/contracts/service-registry/tests/tests.rs b/contracts/service-registry/tests/tests.rs index 4a6ac8480..84837d99a 100644 --- a/contracts/service-registry/tests/tests.rs +++ b/contracts/service-registry/tests/tests.rs @@ -203,7 +203,7 @@ fn bond_worker() { } #[test] -fn declare_chain_support() { +fn register_chain_support() { let worker = Addr::unchecked("worker"); let mut app = App::new(|router, _, storage| { router @@ -270,7 +270,7 @@ fn declare_chain_support() { let res = app.execute_contract( worker.clone(), contract_addr.clone(), - &ExecuteMsg::DeclareChainSupport { + &ExecuteMsg::RegisterChainSupport { service_name: service_name.into(), chains: vec![chain_name.clone()], }, @@ -313,6 +313,908 @@ fn declare_chain_support() { assert_eq!(workers, vec![]) } +/// If a bonded and authorized worker deregisters support for a chain they previously registered support for, +/// that worker should no longer be part of the active worker set for that chain +#[test] +fn register_and_deregister_support_for_single_chain() { + let worker = Addr::unchecked("worker"); + let mut app = App::new(|router, _, storage| { + router + .bank + .init_balance(storage, &worker, coins(100000, AXL_DENOMINATION)) + .unwrap() + }); + let code = ContractWrapper::new(execute, instantiate, query); + let code_id = app.store_code(Box::new(code)); + let governance = Addr::unchecked("gov"); + + let contract_addr = app + .instantiate_contract( + code_id, + Addr::unchecked("anyone"), + &InstantiateMsg { + governance_account: governance.clone().into(), + }, + &[], + "service_registry", + None, + ) + .unwrap(); + let service_name = "validators"; + let min_worker_bond = Uint128::new(100); + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterService { + service_name: service_name.into(), + service_contract: Addr::unchecked("nowhere"), + min_num_workers: 0, + max_num_workers: Some(100), + min_worker_bond, + bond_denom: AXL_DENOMINATION.into(), + unbonding_period_days: 10, + description: "Some service".into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::AuthorizeWorkers { + workers: vec![worker.clone().into()], + service_name: service_name.into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::BondWorker { + service_name: service_name.into(), + }, + &coins(min_worker_bond.u128(), AXL_DENOMINATION), + ); + assert!(res.is_ok()); + + let chain_name = ChainName::from_str("ethereum").unwrap(); + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + // Deregister chain support + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::DeregisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + let workers: Vec = app + .wrap() + .query_wasm_smart( + contract_addr, + &QueryMsg::GetActiveWorkers { + service_name: service_name.into(), + chain_name, + }, + ) + .unwrap(); + assert_eq!(workers, vec![]); +} + +/// Same setting and goal as register_and_deregister_support_for_single_chain() but for multiple chains. +#[test] +fn register_and_deregister_support_for_multiple_chains() { + let worker = Addr::unchecked("worker"); + let mut app = App::new(|router, _, storage| { + router + .bank + .init_balance(storage, &worker, coins(100000, AXL_DENOMINATION)) + .unwrap() + }); + let code = ContractWrapper::new(execute, instantiate, query); + let code_id = app.store_code(Box::new(code)); + let governance = Addr::unchecked("gov"); + + let contract_addr = app + .instantiate_contract( + code_id, + Addr::unchecked("anyone"), + &InstantiateMsg { + governance_account: governance.clone().into(), + }, + &[], + "service_registry", + None, + ) + .unwrap(); + let service_name = "validators"; + let min_worker_bond = Uint128::new(100); + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterService { + service_name: service_name.into(), + service_contract: Addr::unchecked("nowhere"), + min_num_workers: 0, + max_num_workers: Some(100), + min_worker_bond, + bond_denom: AXL_DENOMINATION.into(), + unbonding_period_days: 10, + description: "Some service".into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::AuthorizeWorkers { + workers: vec![worker.clone().into()], + service_name: service_name.into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::BondWorker { + service_name: service_name.into(), + }, + &coins(min_worker_bond.u128(), AXL_DENOMINATION), + ); + assert!(res.is_ok()); + + let chains = vec![ + ChainName::from_str("ethereum").unwrap(), + ChainName::from_str("binance").unwrap(), + ChainName::from_str("avalanche").unwrap(), + ]; + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterChainSupport { + service_name: service_name.into(), + chains: chains.clone(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::DeregisterChainSupport { + service_name: service_name.into(), + chains: chains.clone(), + }, + &[], + ); + assert!(res.is_ok()); + + for chain in chains { + let workers: Vec = app + .wrap() + .query_wasm_smart( + contract_addr.clone(), + &QueryMsg::GetActiveWorkers { + service_name: service_name.into(), + chain_name: chain.clone(), + }, + ) + .unwrap(); + assert_eq!(workers, vec![]); + } +} + +/// If a bonded and authorized worker deregisters support for the first chain among multiple chains, +/// they should remain part of the active worker set for all chains except the first one. +#[test] +fn register_for_multiple_chains_deregister_for_first_one() { + let worker = Addr::unchecked("worker"); + let mut app = App::new(|router, _, storage| { + router + .bank + .init_balance(storage, &worker, coins(100000, AXL_DENOMINATION)) + .unwrap() + }); + let code = ContractWrapper::new(execute, instantiate, query); + let code_id = app.store_code(Box::new(code)); + let governance = Addr::unchecked("gov"); + + let contract_addr = app + .instantiate_contract( + code_id, + Addr::unchecked("anyone"), + &InstantiateMsg { + governance_account: governance.clone().into(), + }, + &[], + "service_registry", + None, + ) + .unwrap(); + let service_name = "validators"; + let min_worker_bond = Uint128::new(100); + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterService { + service_name: service_name.into(), + service_contract: Addr::unchecked("nowhere"), + min_num_workers: 0, + max_num_workers: Some(100), + min_worker_bond, + bond_denom: AXL_DENOMINATION.into(), + unbonding_period_days: 10, + description: "Some service".into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::AuthorizeWorkers { + workers: vec![worker.clone().into()], + service_name: service_name.into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::BondWorker { + service_name: service_name.into(), + }, + &coins(min_worker_bond.u128(), AXL_DENOMINATION), + ); + assert!(res.is_ok()); + + let chains = vec![ + ChainName::from_str("ethereum").unwrap(), + ChainName::from_str("binance").unwrap(), + ChainName::from_str("avalanche").unwrap(), + ]; + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterChainSupport { + service_name: service_name.into(), + chains: chains.clone(), + }, + &[], + ); + assert!(res.is_ok()); + + // Deregister only the first chain + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::DeregisterChainSupport { + service_name: service_name.into(), + chains: vec![chains[0].clone()], + }, + &[], + ); + assert!(res.is_ok()); + + // Verify that worker is not associated with the deregistered chain + let deregistered_chain = chains[0].clone(); + let workers: Vec = app + .wrap() + .query_wasm_smart( + contract_addr.clone(), + &QueryMsg::GetActiveWorkers { + service_name: service_name.into(), + chain_name: deregistered_chain.clone(), + }, + ) + .unwrap(); + assert_eq!(workers, vec![]); + + // Verify that worker is still associated with other chains + for chain in chains.iter().skip(1) { + let workers: Vec = app + .wrap() + .query_wasm_smart( + contract_addr.clone(), + &QueryMsg::GetActiveWorkers { + service_name: service_name.into(), + chain_name: chain.clone(), + }, + ) + .unwrap(); + assert_eq!( + workers, + vec![Worker { + address: worker.clone(), + bonding_state: BondingState::Bonded { + amount: min_worker_bond + }, + authorization_state: AuthorizationState::Authorized, + service_name: service_name.into() + }] + ); + } +} + +/// If a bonded and authorized worker registers support for one chain and later deregisters support for another chain, +/// the active worker set for the original chain should remain unaffected by the deregistration. +#[test] +fn register_support_for_a_chain_deregister_support_for_another_chain() { + let worker = Addr::unchecked("worker"); + let mut app = App::new(|router, _, storage| { + router + .bank + .init_balance(storage, &worker, coins(100000, AXL_DENOMINATION)) + .unwrap() + }); + let code = ContractWrapper::new(execute, instantiate, query); + let code_id = app.store_code(Box::new(code)); + let governance = Addr::unchecked("gov"); + + let contract_addr = app + .instantiate_contract( + code_id, + Addr::unchecked("anyone"), + &InstantiateMsg { + governance_account: governance.clone().into(), + }, + &[], + "service_registry", + None, + ) + .unwrap(); + let service_name = "validators"; + let min_worker_bond = Uint128::new(100); + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterService { + service_name: service_name.into(), + service_contract: Addr::unchecked("nowhere"), + min_num_workers: 0, + max_num_workers: Some(100), + min_worker_bond, + bond_denom: AXL_DENOMINATION.into(), + unbonding_period_days: 10, + description: "Some service".into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::AuthorizeWorkers { + workers: vec![worker.clone().into()], + service_name: service_name.into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::BondWorker { + service_name: service_name.into(), + }, + &coins(min_worker_bond.u128(), AXL_DENOMINATION), + ); + assert!(res.is_ok()); + + let chain_name = ChainName::from_str("ethereum").unwrap(); + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + let second_chain_name = ChainName::from_str("avalanche").unwrap(); + // Deregister support for another chain + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::DeregisterChainSupport { + service_name: service_name.into(), + chains: vec![second_chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + let workers: Vec = app + .wrap() + .query_wasm_smart( + contract_addr.clone(), + &QueryMsg::GetActiveWorkers { + service_name: service_name.into(), + chain_name, + }, + ) + .unwrap(); + assert_eq!( + workers, + vec![Worker { + address: worker, + bonding_state: BondingState::Bonded { + amount: min_worker_bond + }, + authorization_state: AuthorizationState::Authorized, + service_name: service_name.into() + }] + ); +} + +/// If a bonded and authorized worker registers, deregisters, and again registers their support for a single chain, +/// the active worker set of that chain should include the worker. +#[test] +fn register_deregister_register_support_for_single_chain() { + let worker = Addr::unchecked("worker"); + let mut app = App::new(|router, _, storage| { + router + .bank + .init_balance(storage, &worker, coins(100000, AXL_DENOMINATION)) + .unwrap() + }); + let code = ContractWrapper::new(execute, instantiate, query); + let code_id = app.store_code(Box::new(code)); + let governance = Addr::unchecked("gov"); + + let contract_addr = app + .instantiate_contract( + code_id, + Addr::unchecked("anyone"), + &InstantiateMsg { + governance_account: governance.clone().into(), + }, + &[], + "service_registry", + None, + ) + .unwrap(); + let service_name = "validators"; + let min_worker_bond = Uint128::new(100); + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterService { + service_name: service_name.into(), + service_contract: Addr::unchecked("nowhere"), + min_num_workers: 0, + max_num_workers: Some(100), + min_worker_bond, + bond_denom: AXL_DENOMINATION.into(), + unbonding_period_days: 10, + description: "Some service".into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::AuthorizeWorkers { + workers: vec![worker.clone().into()], + service_name: service_name.into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::BondWorker { + service_name: service_name.into(), + }, + &coins(min_worker_bond.u128(), AXL_DENOMINATION), + ); + assert!(res.is_ok()); + + let chain_name = ChainName::from_str("ethereum").unwrap(); + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::DeregisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + // Second support declaration + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + let workers: Vec = app + .wrap() + .query_wasm_smart( + contract_addr.clone(), + &QueryMsg::GetActiveWorkers { + service_name: service_name.into(), + chain_name, + }, + ) + .unwrap(); + assert_eq!( + workers, + vec![Worker { + address: worker, + bonding_state: BondingState::Bonded { + amount: min_worker_bond + }, + authorization_state: AuthorizationState::Authorized, + service_name: service_name.into() + }] + ); +} + +/// If a bonded and authorized worker deregisters their support for a chain they have not previously registered +/// support for, the call should be ignored and the active worker set of the chain should be intact. +#[test] +fn deregister_previously_unsupported_single_chain() { + let worker = Addr::unchecked("worker"); + let mut app = App::new(|router, _, storage| { + router + .bank + .init_balance(storage, &worker, coins(100000, AXL_DENOMINATION)) + .unwrap() + }); + let code = ContractWrapper::new(execute, instantiate, query); + let code_id = app.store_code(Box::new(code)); + let governance = Addr::unchecked("gov"); + + let contract_addr = app + .instantiate_contract( + code_id, + Addr::unchecked("anyone"), + &InstantiateMsg { + governance_account: governance.clone().into(), + }, + &[], + "service_registry", + None, + ) + .unwrap(); + let service_name = "validators"; + let min_worker_bond = Uint128::new(100); + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterService { + service_name: service_name.into(), + service_contract: Addr::unchecked("nowhere"), + min_num_workers: 0, + max_num_workers: Some(100), + min_worker_bond, + bond_denom: AXL_DENOMINATION.into(), + unbonding_period_days: 10, + description: "Some service".into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::AuthorizeWorkers { + workers: vec![worker.clone().into()], + service_name: service_name.into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::BondWorker { + service_name: service_name.into(), + }, + &coins(min_worker_bond.u128(), AXL_DENOMINATION), + ); + assert!(res.is_ok()); + + let chain_name = ChainName::from_str("ethereum").unwrap(); + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::DeregisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + let workers: Vec = app + .wrap() + .query_wasm_smart( + contract_addr, + &QueryMsg::GetActiveWorkers { + service_name: service_name.into(), + chain_name: ChainName::from_str("some other chain").unwrap(), + }, + ) + .unwrap(); + assert_eq!(workers, vec![]) +} + +/// If a unbonded but authorized worker deregisters support for a chain they previously registered support for, +/// that worker should not be part of the active worker set for that chain. +#[test] +fn register_and_deregister_support_for_single_chain_unbonded() { + let worker = Addr::unchecked("worker"); + let mut app = App::new(|router, _, storage| { + router + .bank + .init_balance(storage, &worker, coins(100000, AXL_DENOMINATION)) + .unwrap() + }); + let code = ContractWrapper::new(execute, instantiate, query); + let code_id = app.store_code(Box::new(code)); + let governance = Addr::unchecked("gov"); + + let contract_addr = app + .instantiate_contract( + code_id, + Addr::unchecked("anyone"), + &InstantiateMsg { + governance_account: governance.clone().into(), + }, + &[], + "service_registry", + None, + ) + .unwrap(); + let service_name = "validators"; + let min_worker_bond = Uint128::new(100); + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterService { + service_name: service_name.into(), + service_contract: Addr::unchecked("nowhere"), + min_num_workers: 0, + max_num_workers: Some(100), + min_worker_bond, + bond_denom: AXL_DENOMINATION.into(), + unbonding_period_days: 10, + description: "Some service".into(), + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::AuthorizeWorkers { + workers: vec![worker.clone().into()], + service_name: service_name.into(), + }, + &[], + ); + assert!(res.is_ok()); + + let chain_name = ChainName::from_str("ethereum").unwrap(); + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + let res = app.execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::DeregisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ); + assert!(res.is_ok()); + + let workers: Vec = app + .wrap() + .query_wasm_smart( + contract_addr, + &QueryMsg::GetActiveWorkers { + service_name: service_name.into(), + chain_name, + }, + ) + .unwrap(); + assert_eq!(workers, vec![]); +} + +/// If a worker that is not part of a service deregisters support for a chain from that specific service, +/// process should return a contract error of type WorkerNotFound. +#[test] +fn deregister_from_unregistered_worker_single_chain() { + let worker = Addr::unchecked("worker"); + let mut app = App::new(|router, _, storage| { + router + .bank + .init_balance(storage, &worker, coins(100000, AXL_DENOMINATION)) + .unwrap() + }); + let code = ContractWrapper::new(execute, instantiate, query); + let code_id = app.store_code(Box::new(code)); + let governance = Addr::unchecked("gov"); + + let contract_addr = app + .instantiate_contract( + code_id, + Addr::unchecked("anyone"), + &InstantiateMsg { + governance_account: governance.clone().into(), + }, + &[], + "service_registry", + None, + ) + .unwrap(); + let service_name = "validators"; + let min_worker_bond = Uint128::new(100); + let res = app.execute_contract( + governance.clone(), + contract_addr.clone(), + &ExecuteMsg::RegisterService { + service_name: service_name.into(), + service_contract: Addr::unchecked("nowhere"), + min_num_workers: 0, + max_num_workers: Some(100), + min_worker_bond, + bond_denom: AXL_DENOMINATION.into(), + unbonding_period_days: 10, + description: "Some service".into(), + }, + &[], + ); + assert!(res.is_ok()); + + let chain_name = ChainName::from_str("ethereum").unwrap(); + let err = app + .execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::DeregisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ) + .unwrap_err(); + + assert_eq!( + err.downcast::() + .unwrap() + .to_string(), + axelar_wasm_std::ContractError::from(ContractError::WorkerNotFound).to_string() + ); + + let workers: Vec = app + .wrap() + .query_wasm_smart( + contract_addr, + &QueryMsg::GetActiveWorkers { + service_name: service_name.into(), + chain_name, + }, + ) + .unwrap(); + assert_eq!(workers, vec![]); +} + +/// If a worker deregisters support for a chain of an unregistered service, +/// process should return a contract error of type ServiceNotFound. +#[test] +fn deregister_single_chain_for_nonexistent_service() { + let worker = Addr::unchecked("worker"); + let mut app = App::new(|router, _, storage| { + router + .bank + .init_balance(storage, &worker, coins(100000, AXL_DENOMINATION)) + .unwrap() + }); + let code = ContractWrapper::new(execute, instantiate, query); + let code_id = app.store_code(Box::new(code)); + let governance = Addr::unchecked("gov"); + + let contract_addr = app + .instantiate_contract( + code_id, + Addr::unchecked("anyone"), + &InstantiateMsg { + governance_account: governance.clone().into(), + }, + &[], + "service_registry", + None, + ) + .unwrap(); + let service_name = "validators"; + let chain_name = ChainName::from_str("ethereum").unwrap(); + let err = app + .execute_contract( + worker.clone(), + contract_addr.clone(), + &ExecuteMsg::DeregisterChainSupport { + service_name: service_name.into(), + chains: vec![chain_name.clone()], + }, + &[], + ) + .unwrap_err(); + + assert_eq!( + err.downcast::() + .unwrap() + .to_string(), + axelar_wasm_std::ContractError::from(ContractError::ServiceNotFound).to_string() + ); +} + #[test] fn unbond_worker() { let worker = Addr::unchecked("worker"); @@ -381,7 +1283,7 @@ fn unbond_worker() { let res = app.execute_contract( worker.clone(), contract_addr.clone(), - &ExecuteMsg::DeclareChainSupport { + &ExecuteMsg::RegisterChainSupport { service_name: service_name.into(), chains: vec![chain_name.clone()], }, @@ -533,7 +1435,7 @@ fn bond_but_not_authorized() { let res = app.execute_contract( worker.clone(), contract_addr.clone(), - &ExecuteMsg::DeclareChainSupport { + &ExecuteMsg::RegisterChainSupport { service_name: service_name.into(), chains: vec![chain_name.clone()], }, @@ -623,7 +1525,7 @@ fn bond_but_not_enough() { let res = app.execute_contract( worker.clone(), contract_addr.clone(), - &ExecuteMsg::DeclareChainSupport { + &ExecuteMsg::RegisterChainSupport { service_name: service_name.into(), chains: vec![chain_name.clone()], }, @@ -713,7 +1615,7 @@ fn bond_before_authorize() { let res = app.execute_contract( worker.clone(), contract_addr.clone(), - &ExecuteMsg::DeclareChainSupport { + &ExecuteMsg::RegisterChainSupport { service_name: service_name.into(), chains: vec![chain_name.clone()], }, @@ -812,7 +1714,7 @@ fn unbond_then_rebond() { let res = app.execute_contract( worker.clone(), contract_addr.clone(), - &ExecuteMsg::DeclareChainSupport { + &ExecuteMsg::RegisterChainSupport { service_name: service_name.into(), chains: vec![chain_name.clone()], }, @@ -934,7 +1836,7 @@ fn unbonding_period() { let res = app.execute_contract( worker.clone(), contract_addr.clone(), - &ExecuteMsg::DeclareChainSupport { + &ExecuteMsg::RegisterChainSupport { service_name: service_name.into(), chains: vec![chain_name], }, diff --git a/integration-tests/tests/test_utils/mod.rs b/integration-tests/tests/test_utils/mod.rs index f2df62b1c..a00e8ec5e 100644 --- a/integration-tests/tests/test_utils/mod.rs +++ b/integration-tests/tests/test_utils/mod.rs @@ -430,7 +430,7 @@ pub fn register_workers( let response = app.execute_contract( worker.addr.clone(), service_registry.clone(), - &service_registry::msg::ExecuteMsg::DeclareChainSupport { + &service_registry::msg::ExecuteMsg::RegisterChainSupport { service_name: service_name.to_string(), chains: worker.supported_chains.clone(), }, From d6cf113925992a13e46243d232f0b20dee2509c5 Mon Sep 17 00:00:00 2001 From: Christian Gorenflo Date: Wed, 31 Jan 2024 12:31:29 -0500 Subject: [PATCH 14/54] fix!: event processor stops when one of the handlers fails (#244) --- ampd/src/asyncutil/mod.rs | 1 + ampd/src/asyncutil/task.rs | 179 ++++++++++++++ ampd/src/broadcaster.rs | 6 +- ampd/src/commands/daemon.rs | 3 + ampd/src/config.rs | 4 + ampd/src/event_processor.rs | 321 +++++++++++++++----------- ampd/src/event_sub.rs | 52 ++--- ampd/src/lib.rs | 124 +++++----- ampd/src/tests/config_template.toml | 1 + packages/axelar-wasm-std/src/error.rs | 13 ++ packages/axelar-wasm-std/src/lib.rs | 2 +- 11 files changed, 488 insertions(+), 218 deletions(-) create mode 100644 ampd/src/asyncutil/mod.rs create mode 100644 ampd/src/asyncutil/task.rs diff --git a/ampd/src/asyncutil/mod.rs b/ampd/src/asyncutil/mod.rs new file mode 100644 index 000000000..cdafe4ad6 --- /dev/null +++ b/ampd/src/asyncutil/mod.rs @@ -0,0 +1 @@ +pub mod task; diff --git a/ampd/src/asyncutil/task.rs b/ampd/src/asyncutil/task.rs new file mode 100644 index 000000000..77a9b7b5c --- /dev/null +++ b/ampd/src/asyncutil/task.rs @@ -0,0 +1,179 @@ +use axelar_wasm_std::error::extend_err; +use error_stack::{Context, Result, ResultExt}; +use std::future::Future; +use std::pin::Pin; +use thiserror::Error; +use tokio::task::JoinSet; +use tokio_util::sync::CancellationToken; +use tracing::info; + +type PinnedFuture = Pin + Send>>; +/// This type represents an awaitable action that can be cancelled. It abstracts away the necessary boxing and pinning +/// to make it work in async contexts. It can be freely moved around and stored in collections. +pub struct CancellableTask { + run_task: Box PinnedFuture + Send>, +} + +impl CancellableTask { + /// Creates a movable task from an async function or closure. + pub fn create(func: impl FnOnce(CancellationToken) -> Fut + Send + 'static) -> Self + where + Fut: Future + Send + 'static, + { + Self { + run_task: Box::new(move |token: CancellationToken| Box::pin(func(token))), + } + } + + pub async fn run(self, token: CancellationToken) -> T { + (self.run_task)(token).await + } +} + +pub struct TaskGroup +where + E: From + Context, +{ + tasks: Vec>>, +} + +impl TaskGroup +where + E: From + Context, +{ + pub fn new() -> Self { + TaskGroup { tasks: vec![] } + } + + /// The added tasks won't be started until [Self::run] is called + pub fn add_task(mut self, task: CancellableTask>) -> Self { + self.tasks.push(task); + self + } + + /// Runs all tasks concurrently. If one task fails, all others are cancelled and the collection of errors is returned. + /// If a task panics, it still returns an error to the manager, so the parent process can shut down all tasks gracefully. + pub async fn run(self, token: CancellationToken) -> Result<(), E> { + // running tasks and waiting for them is tightly coupled, so they both share a cloned token + let mut running_tasks = start_tasks(self.tasks, token.clone()); + wait_for_completion(&mut running_tasks, &token).await + } +} + +fn start_tasks(tasks: Vec>, token: CancellationToken) -> JoinSet +where + T: Send + 'static, +{ + let mut join_set = JoinSet::new(); + + for task in tasks.into_iter() { + // tasks clean up on their own after the cancellation token is triggered, so we discard the abort handles. + // However, we don't know what tasks will do with their token, so we need to create new child tokens here, + // so each task can act independently + join_set.spawn(task.run(token.child_token())); + } + join_set +} + +async fn wait_for_completion( + running_tasks: &mut JoinSet>, + token: &CancellationToken, +) -> Result<(), E> +where + E: From + Context, +{ + let mut final_result = Ok(()); + let total_task_count = running_tasks.len(); + while let Some(task_result) = running_tasks.join_next().await { + // if one task stops, all others should stop as well, so we cancel the token. + // Any call to this after the first is a no-op, so no need to guard it. + token.cancel(); + info!( + "shutting down sub-tasks ({}/{})", + total_task_count - running_tasks.len(), + total_task_count + ); + + final_result = match task_result.change_context(E::from(TaskError {})) { + Err(err) | Ok(Err(err)) => extend_err(final_result, err), + Ok(_) => final_result, + }; + } + + final_result +} + +#[derive(Error, Debug)] +#[error("task failed")] +pub struct TaskError; + +#[cfg(test)] +mod test { + use crate::asyncutil::task::{CancellableTask, TaskError, TaskGroup}; + use error_stack::report; + use tokio_util::sync::CancellationToken; + + #[tokio::test] + async fn running_no_tasks_returns_no_error() { + let tasks: TaskGroup = TaskGroup::new(); + assert!(tasks.run(CancellationToken::new()).await.is_ok()); + } + + #[tokio::test] + async fn when_one_task_ends_cancel_all_others() { + let waiting_task = |token: CancellationToken| async move { + token.cancelled().await; + Ok(()) + }; + + let tasks: TaskGroup = TaskGroup::new() + .add_task(CancellableTask::create(waiting_task)) + .add_task(CancellableTask::create(waiting_task)) + .add_task(CancellableTask::create(|_| async { Ok(()) })) + .add_task(CancellableTask::create(waiting_task)); + assert!(tasks.run(CancellationToken::new()).await.is_ok()); + } + + #[tokio::test] + async fn collect_all_errors_on_completion() { + let tasks = TaskGroup::new() + .add_task(CancellableTask::create(|token| async move { + token.cancelled().await; + Err(report!(TaskError {})) + })) + .add_task(CancellableTask::create(|token| async move { + token.cancelled().await; + Err(report!(TaskError {})) + })) + .add_task(CancellableTask::create(|_| async { Ok(()) })) + .add_task(CancellableTask::create(|token| async move { + token.cancelled().await; + Err(report!(TaskError {})) + })) + .add_task(CancellableTask::create(|_| async { Ok(()) })) + .add_task(CancellableTask::create(|token| async move { + token.cancelled().await; + Err(report!(TaskError {})) + })); + let result = tasks.run(CancellationToken::new()).await; + let err = result.unwrap_err(); + assert_eq!(err.current_frames().len(), 4); + } + + #[tokio::test] + async fn shutdown_gracefully_on_task_panic() { + let tasks = TaskGroup::new() + .add_task(CancellableTask::create(|_| async { Ok(()) })) + .add_task(CancellableTask::create(|_| async { panic!("panic") })) + .add_task(CancellableTask::create(|_| async { + Err(report!(TaskError {})) + })) + .add_task(CancellableTask::create(|_| async { Ok(()) })) + .add_task(CancellableTask::create(|_| async { + Err(report!(TaskError {})) + })); + let result = tasks.run(CancellationToken::new()).await; + let err = result.unwrap_err(); + assert_eq!(err.current_frames().len(), 3); + } +} diff --git a/ampd/src/broadcaster.rs b/ampd/src/broadcaster.rs index b6ba4926e..33bc8d0d5 100644 --- a/ampd/src/broadcaster.rs +++ b/ampd/src/broadcaster.rs @@ -45,7 +45,7 @@ pub enum Error { #[error("failed to confirm tx inclusion in block")] TxConfirmation, #[error("failed to execute tx")] - ExecutionError { response: TxResponse }, + Execution { response: TxResponse }, } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] @@ -250,7 +250,7 @@ fn evaluate_response(response: Result) -> ConfirmationRes .. }) => match response { TxResponse { code: 0, .. } => ConfirmationResult::Success, - _ => ConfirmationResult::Critical(Error::ExecutionError { response }), + _ => ConfirmationResult::Critical(Error::Execution { response }), }, } } @@ -520,7 +520,7 @@ mod tests { .await .unwrap_err() .current_context(), - Error::ExecutionError { + Error::Execution { response: TxResponse { code: 32, .. } } )); diff --git a/ampd/src/commands/daemon.rs b/ampd/src/commands/daemon.rs index 03c0e806d..176022a24 100644 --- a/ampd/src/commands/daemon.rs +++ b/ampd/src/commands/daemon.rs @@ -1,6 +1,7 @@ use std::path::Path; use error_stack::{Report, ResultExt}; +use tracing::info; use crate::config::Config; use crate::state::{flush, load}; @@ -9,6 +10,8 @@ use crate::Error; pub async fn run(config: Config, state_path: &Path) -> Result, Report> { let state = load(state_path).change_context(Error::LoadConfig)?; let (state, execution_result) = crate::run(config, state).await; + + info!("persisting state"); let state_flush_result = flush(&state, state_path).change_context(Error::ReturnState); match (execution_result, state_flush_result) { diff --git a/ampd/src/config.rs b/ampd/src/config.rs index f0feb943f..f76153449 100644 --- a/ampd/src/config.rs +++ b/ampd/src/config.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use std::time::Duration; use crate::broadcaster; use crate::commands::ServiceRegistryConfig; @@ -12,6 +13,8 @@ pub struct Config { pub tm_jsonrpc: Url, pub tm_grpc: Url, pub event_buffer_cap: usize, + #[serde(with = "humantime_serde")] + pub event_stream_timeout: Duration, pub broadcast: broadcaster::Config, #[serde(deserialize_with = "deserialize_handler_configs")] pub handlers: Vec, @@ -28,6 +31,7 @@ impl Default for Config { handlers: vec![], tofnd_config: TofndConfig::default(), event_buffer_cap: 100000, + event_stream_timeout: Duration::from_secs(15), service_registry: ServiceRegistryConfig::default(), } } diff --git a/ampd/src/event_processor.rs b/ampd/src/event_processor.rs index da51e8dcc..a03decb88 100644 --- a/ampd/src/event_processor.rs +++ b/ampd/src/event_processor.rs @@ -1,18 +1,18 @@ -use core::future::Future; -use core::pin::Pin; -use std::vec; +use std::pin::Pin; +use std::time::Duration; use async_trait::async_trait; use error_stack::{Context, Result, ResultExt}; use events::Event; -use futures::{future::try_join_all, StreamExt}; +use futures::StreamExt; use thiserror::Error; +use tokio::time::timeout; use tokio_stream::Stream; use tokio_util::sync::CancellationToken; -use crate::handlers::chain; +use crate::asyncutil::task::TaskError; -type Task = Box> + Send>; +use crate::handlers::chain; #[async_trait] pub trait EventHandler { @@ -30,168 +30,233 @@ pub trait EventHandler { } #[derive(Error, Debug)] -pub enum EventProcessorError { - #[error("event handler failed handling event")] - EventHandlerError, - #[error("event stream error")] - EventStreamError, +pub enum Error { + #[error("handler failed to process event")] + Handler, + #[error("could not consume events from stream")] + EventStream, + #[error("handler stopped prematurely")] + Tasks(#[from] TaskError), } -fn consume_events(event_stream: S, handler: H, token: CancellationToken) -> Task +/// Let the `handler` consume events from the `event_stream`. The token is checked for cancellation +/// at the end of each consumed block or when the `event_stream` times out. If the token is cancelled or the +/// `event_stream` is closed, the function returns +pub async fn consume_events( + handler: H, + event_stream: S, + stream_timeout: Duration, + token: CancellationToken, +) -> Result<(), Error> where - H: EventHandler + Send + Sync + 'static, - S: Stream> + Send + 'static, + H: EventHandler, + S: Stream>, E: Context, { - let task = async move { - let mut event_stream = Box::pin(event_stream); - while let Some(res) = event_stream.next().await { - let event = res.change_context(EventProcessorError::EventStreamError)?; - - handler - .handle(&event) - .await - .change_context(EventProcessorError::EventHandlerError)?; - - if matches!(event, Event::BlockEnd(_)) && token.is_cancelled() { - break; - } - } + let mut event_stream = Box::pin(event_stream); + loop { + let stream_status = retrieve_next_event(&mut event_stream, stream_timeout) + .await + .change_context(Error::EventStream)?; - Ok(()) - }; + if let StreamStatus::Active(event) = &stream_status { + handler.handle(event).await.change_context(Error::Handler)?; + } - Box::new(task) + if should_task_stop(stream_status, &token) { + return Ok(()); + } + } } -pub struct EventProcessor { - tasks: Vec>, - token: CancellationToken, +async fn retrieve_next_event( + event_stream: &mut Pin>, + stream_timeout: Duration, +) -> Result +where + S: Stream>, + E: Context, +{ + let status = match timeout(stream_timeout, event_stream.next()).await { + Err(_) => StreamStatus::TimedOut, + Ok(None) => StreamStatus::Closed, + Ok(Some(event)) => StreamStatus::Active(event?), + }; + Ok(status) } -impl EventProcessor { - pub fn new(token: CancellationToken) -> Self { - EventProcessor { - tasks: vec![], - token, +fn should_task_stop(stream_status: StreamStatus, token: &CancellationToken) -> bool { + match stream_status { + StreamStatus::Active(Event::BlockEnd(_)) | StreamStatus::TimedOut + if token.is_cancelled() => + { + true } + StreamStatus::Closed => true, + _ => false, } +} - pub fn add_handler(&mut self, handler: H, event_stream: S) -> &mut Self - where - H: EventHandler + Send + Sync + 'static, - S: Stream> + Send + 'static, - E: Context, - { - self.tasks - .push(consume_events(event_stream, handler, self.token.child_token()).into()); - self - } - - pub async fn run(self) -> Result<(), EventProcessorError> { - let handles = self.tasks.into_iter().map(tokio::spawn); - - try_join_all(handles) - .await - .change_context(EventProcessorError::EventHandlerError)? - .into_iter() - .find(Result::is_err) - .unwrap_or(Ok(())) - } +enum StreamStatus { + Active(Event), + Closed, + TimedOut, } #[cfg(test)] mod tests { - use crate::event_processor::{EventHandler, EventProcessor}; + use crate::event_processor::{consume_events, Error, EventHandler}; use async_trait::async_trait; - use error_stack::{Report, Result}; - use futures::TryStreamExt; + use error_stack::{report, Result}; + use futures::stream; use mockall::mock; - use thiserror::Error; - use tokio::{self, sync::broadcast}; - use tokio_stream::wrappers::BroadcastStream; + use std::time::Duration; + + use crate::event_processor; + use events::Event; + use tokio::time::timeout; use tokio_util::sync::CancellationToken; #[tokio::test] - async fn should_handle_events() { - let event_count = 10; - let (tx, rx) = broadcast::channel::(event_count); - let token = CancellationToken::new(); - let mut processor = EventProcessor::new(token.child_token()); + async fn stop_when_stream_closes() { + let events: Vec> = vec![ + Ok(Event::BlockEnd(0_u32.into())), + Ok(Event::BlockEnd(1_u32.into())), + Ok(Event::BlockEnd(3_u32.into())), + ]; let mut handler = MockEventHandler::new(); handler .expect_handle() - .returning(|_| Ok(())) - .times(event_count); + .times(events.len()) + .returning(|_| Ok(())); + + let result_with_timeout = timeout( + Duration::from_secs(1), + consume_events( + handler, + stream::iter(events), + Duration::from_secs(1000), + CancellationToken::new(), + ), + ) + .await; + + assert!(result_with_timeout.is_ok()); + assert!(result_with_timeout.unwrap().is_ok()); + } - tokio::spawn(async move { - for i in 0..event_count { - assert!(tx.send(events::Event::BlockEnd((i as u32).into())).is_ok()); - } - }); + #[tokio::test] + async fn return_error_when_stream_fails() { + let events: Vec> = vec![ + Ok(Event::BlockEnd(0_u32.into())), + Err(report!(Error::EventStream)), + ]; - processor.add_handler(handler, BroadcastStream::new(rx).map_err(Report::from)); - assert!(processor.run().await.is_ok()); + let mut handler = MockEventHandler::new(); + handler.expect_handle().times(1).returning(|_| Ok(())); + + let result_with_timeout = timeout( + Duration::from_secs(1), + consume_events( + handler, + stream::iter(events), + Duration::from_secs(1000), + CancellationToken::new(), + ), + ) + .await; + + assert!(result_with_timeout.is_ok()); + assert!(result_with_timeout.unwrap().is_err()); } #[tokio::test] - async fn should_return_error_if_handler_fails() { - let (tx, rx) = broadcast::channel::(10); - let token = CancellationToken::new(); - let mut processor = EventProcessor::new(token.child_token()); + async fn return_error_when_handler_fails() { + let events: Vec> = vec![ + Ok(Event::BlockEnd(0_u32.into())), + Ok(Event::BlockEnd(1_u32.into())), + ]; let mut handler = MockEventHandler::new(); handler .expect_handle() - .returning(|_| Err(EventHandlerError::Unknown.into())) - .once(); - - tokio::spawn(async move { - assert!(tx.send(events::Event::BlockEnd((10_u32).into())).is_ok()); - }); - - processor.add_handler(handler, BroadcastStream::new(rx).map_err(Report::from)); - assert!(processor.run().await.is_err()); + .times(1) + .returning(|_| Err(report!(EventHandlerError::Failed))); + + let result_with_timeout = timeout( + Duration::from_secs(1), + consume_events( + handler, + stream::iter(events), + Duration::from_secs(1000), + CancellationToken::new(), + ), + ) + .await; + + assert!(result_with_timeout.is_ok()); + assert!(result_with_timeout.unwrap().is_err()); } #[tokio::test] - async fn should_support_multiple_types_of_handlers() { - let event_count = 10; - let (tx, rx) = broadcast::channel::(event_count); - let token = CancellationToken::new(); - let mut processor = EventProcessor::new(token.child_token()); - let stream = BroadcastStream::new(rx).map_err(Report::from); - let another_stream = BroadcastStream::new(tx.subscribe()).map_err(Report::from); + async fn react_to_cancellation_at_block_end() { + let events: Vec> = vec![ + Ok(Event::BlockBegin(0_u32.into())), + Ok(Event::BlockBegin(1_u32.into())), + Ok(Event::BlockBegin(2_u32.into())), + Ok(Event::BlockEnd(3_u32.into())), + Ok(Event::BlockBegin(4_u32.into())), + ]; let mut handler = MockEventHandler::new(); - handler - .expect_handle() - .returning(|_| Ok(())) - .times(event_count); + handler.expect_handle().times(4).returning(|_| Ok(())); - let mut another_handler = MockAnotherEventHandler::new(); - another_handler - .expect_handle() - .returning(|_| Ok(())) - .times(event_count); + let token = CancellationToken::new(); + token.cancel(); + + let result_with_timeout = timeout( + Duration::from_secs(1), + consume_events( + handler, + stream::iter(events), + Duration::from_secs(1000), + token, + ), + ) + .await; + + assert!(result_with_timeout.is_ok()); + assert!(result_with_timeout.unwrap().is_ok()); + } - tokio::spawn(async move { - for i in 0..event_count { - assert!(tx.send(events::Event::BlockEnd((i as u32).into())).is_ok()); - } - }); + #[tokio::test] + async fn react_to_cancellation_on_timeout() { + let mut handler = MockEventHandler::new(); + handler.expect_handle().times(0).returning(|_| Ok(())); - processor - .add_handler(handler, stream) - .add_handler(another_handler, another_stream); - assert!(processor.run().await.is_ok()); + let token = CancellationToken::new(); + token.cancel(); + + let result_with_timeout = timeout( + Duration::from_secs(1), + consume_events( + handler, + stream::pending::>(), // never returns any items so it can time out + Duration::from_secs(0), + token, + ), + ) + .await; + + assert!(result_with_timeout.is_ok()); + assert!(result_with_timeout.unwrap().is_ok()); } #[derive(Error, Debug)] pub enum EventHandlerError { - #[error("unknown")] - Unknown, + #[error("failed")] + Failed, } mock! { @@ -201,21 +266,7 @@ mod tests { impl EventHandler for EventHandler { type Err = EventHandlerError; - async fn handle(&self, event: &events::Event) -> Result<(), EventHandlerError>; - } - } - - #[derive(Error, Debug)] - pub enum AnotherEventHandlerError {} - - mock! { - AnotherEventHandler{} - - #[async_trait] - impl EventHandler for AnotherEventHandler { - type Err = AnotherEventHandlerError; - - async fn handle(&self, event: &events::Event) -> Result<(), AnotherEventHandlerError>; + async fn handle(&self, event: &Event) -> Result<(), EventHandlerError>; } } } diff --git a/ampd/src/event_sub.rs b/ampd/src/event_sub.rs index 630539156..13d5f85b7 100644 --- a/ampd/src/event_sub.rs +++ b/ampd/src/event_sub.rs @@ -21,24 +21,22 @@ use events::Event; use crate::tm_client::TmClient; -pub struct EventSub { +pub struct EventPublisher { client: T, start_from: Option, poll_interval: Duration, tx: Sender, - token: CancellationToken, } -impl EventSub { - pub fn new(client: T, capacity: usize, token: CancellationToken) -> Self { +impl EventPublisher { + pub fn new(client: T, capacity: usize) -> Self { let (tx, _) = broadcast::channel::(capacity); - EventSub { + EventPublisher { client, start_from: None, poll_interval: Duration::new(5, 0), tx, - token, } } @@ -53,11 +51,11 @@ impl EventSub { self } - pub fn sub(&mut self) -> impl Stream> { + pub fn subscribe(&mut self) -> impl Stream> { BroadcastStream::new(self.tx.subscribe()).map_err(Report::from) } - pub async fn run(mut self) -> Result<(), EventSubError> { + pub async fn run(mut self, token: CancellationToken) -> Result<(), EventSubError> { let mut curr_block_height = match self.start_from { Some(start_from) => start_from, None => self.latest_block_height().await?, @@ -67,9 +65,9 @@ impl EventSub { loop { select! { _ = interval.tick() => { - curr_block_height = self.process_blocks_from(curr_block_height).await?.increment(); + curr_block_height = self.process_blocks_from(curr_block_height, &token).await?.increment(); }, - _ = self.token.cancelled() => { + _ = token.cancelled() => { info!("event sub exiting"); return Ok(()) @@ -92,6 +90,7 @@ impl EventSub { async fn process_blocks_from( &mut self, from: block::Height, + token: &CancellationToken, ) -> Result { let mut height = from; let to = self.latest_block_height().await?; @@ -101,7 +100,7 @@ impl EventSub { .attach_printable(format!("{{ block_height = {height} }}")) .await?; - if self.token.is_cancelled() { + if token.is_cancelled() { return Ok(height); } @@ -188,7 +187,7 @@ mod tests { use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::CancellationToken; - use crate::event_sub::{skip_to_block, Event, EventSub}; + use crate::event_sub::{skip_to_block, Event, EventPublisher}; use crate::tm_client; #[test] @@ -255,11 +254,12 @@ mod tests { }); let token = CancellationToken::new(); - let event_sub = EventSub::new(mock_client, 2 * block_count as usize, token.child_token()); - let mut client = event_sub.start_from(from_height); - let mut stream = client.sub(); + let event_publisher = EventPublisher::new(mock_client, 2 * block_count as usize); + let mut client = event_publisher.start_from(from_height); + let mut stream = client.subscribe(); - let handle = tokio::spawn(async move { client.run().await }); + let child_token = token.child_token(); + let handle = tokio::spawn(async move { client.run(child_token).await }); for height in from_height.value()..to_height.value() { let event = stream.next().await; @@ -313,10 +313,11 @@ mod tests { }); let token = CancellationToken::new(); - let mut event_sub = EventSub::new(mock_client, 10, token.child_token()); - let mut stream = event_sub.sub(); + let mut event_publisher = EventPublisher::new(mock_client, 10); + let mut stream = event_publisher.subscribe(); - let handle = tokio::spawn(async move { event_sub.run().await }); + let child_token = token.child_token(); + let handle = tokio::spawn(async move { event_publisher.run(child_token).await }); let event = stream.next().await; assert_eq!(event.unwrap().unwrap(), Event::BlockBegin(height)); @@ -399,17 +400,14 @@ mod tests { }); let token = CancellationToken::new(); - let event_sub = EventSub::new( - mock_client, - block_count * event_count_per_block, - token.child_token(), - ); - let mut client = event_sub + let event_publisher = EventPublisher::new(mock_client, block_count * event_count_per_block); + let mut client = event_publisher .start_from(block_height) .poll_interval(Duration::new(0, 1e8 as u32)); - let mut stream = client.sub(); + let mut stream = client.subscribe(); - let handle = tokio::spawn(async move { client.run().await }); + let child_token = token.child_token(); + let handle = tokio::spawn(async move { client.run(child_token).await }); for i in 1..(block_count * event_count_per_block + 1) { let event = stream.next().await; diff --git a/ampd/src/lib.rs b/ampd/src/lib.rs index f1bcf9f6b..e4e8f3224 100644 --- a/ampd/src/lib.rs +++ b/ampd/src/lib.rs @@ -1,20 +1,21 @@ use std::pin::Pin; +use std::time::Duration; use block_height_monitor::BlockHeightMonitor; use cosmos_sdk_proto::cosmos::{ auth::v1beta1::query_client::QueryClient, tx::v1beta1::service_client::ServiceClient, }; -use error_stack::{FutureExt, Result, ResultExt}; +use error_stack::{report, FutureExt, Result, ResultExt}; use thiserror::Error; use tokio::signal::unix::{signal, SignalKind}; use tokio::sync::oneshot; -use tokio::task::JoinSet; use tokio_stream::Stream; use tokio_util::sync::CancellationToken; use tracing::info; +use crate::asyncutil::task::{CancellableTask, TaskError, TaskGroup}; use broadcaster::{accounts::account, Broadcaster}; -use event_processor::{EventHandler, EventProcessor}; +use event_processor::EventHandler; use events::Event; use queue::queued_broadcaster::{QueuedBroadcaster, QueuedBroadcasterDriver}; use state::StateUpdater; @@ -24,6 +25,7 @@ use types::TMAddress; use crate::config::Config; use crate::state::State; +mod asyncutil; mod block_height_monitor; mod broadcaster; pub mod commands; @@ -63,6 +65,7 @@ async fn prepare_app(cfg: Config, state: State) -> Result, handlers, tofnd_config, event_buffer_cap, + event_stream_timeout, service_registry: _service_registry, } = cfg; @@ -124,15 +127,15 @@ async fn prepare_app(cfg: Config, state: State) -> Result, event_buffer_cap, block_height_monitor, ) - .configure_handlers(worker, handlers) + .configure_handlers(worker, handlers, event_stream_timeout) } struct App where T: Broadcaster, { - event_sub: event_sub::EventSub, - event_processor: EventProcessor, + event_publisher: event_sub::EventPublisher, + event_processor: TaskGroup, broadcaster: QueuedBroadcaster, #[allow(dead_code)] broadcaster_driver: QueuedBroadcasterDriver, @@ -157,13 +160,13 @@ where ) -> Self { let token = CancellationToken::new(); - let event_sub = event_sub::EventSub::new(tm_client, event_buffer_cap, token.child_token()); - let event_sub = match state_updater.state().min_handler_block_height() { - Some(min_height) => event_sub.start_from(min_height.increment()), - None => event_sub, + let event_publisher = event_sub::EventPublisher::new(tm_client, event_buffer_cap); + let event_publisher = match state_updater.state().min_handler_block_height() { + Some(min_height) => event_publisher.start_from(min_height.increment()), + None => event_publisher, }; - let event_processor = EventProcessor::new(token.child_token()); + let event_processor = TaskGroup::new(); let (broadcaster, broadcaster_driver) = QueuedBroadcaster::new( broadcaster, broadcast_cfg.batch_gas_limit, @@ -172,7 +175,7 @@ where ); Self { - event_sub, + event_publisher, event_processor, broadcaster, broadcaster_driver, @@ -187,13 +190,14 @@ where mut self, worker: TMAddress, handler_configs: Vec, + stream_timeout: Duration, ) -> Result, Error> { for config in handler_configs { - match config { + let task = match config { handlers::config::Config::EvmMsgVerifier { chain, cosmwasm_contract, - } => self.configure_handler( + } => self.create_handler_task( format!("{}-msg-verifier", chain.name), handlers::evm_verify_msg::Handler::new( worker.clone(), @@ -204,11 +208,12 @@ where self.broadcaster.client(), self.block_height_monitor.latest_block_height(), ), + stream_timeout, ), handlers::config::Config::EvmWorkerSetVerifier { chain, cosmwasm_contract, - } => self.configure_handler( + } => self.create_handler_task( format!("{}-worker-set-verifier", chain.name), handlers::evm_verify_worker_set::Handler::new( worker.clone(), @@ -219,9 +224,10 @@ where self.broadcaster.client(), self.block_height_monitor.latest_block_height(), ), + stream_timeout, ), handlers::config::Config::MultisigSigner { cosmwasm_contract } => self - .configure_handler( + .create_handler_task( "multisig-signer", handlers::multisig::Handler::new( worker.clone(), @@ -230,11 +236,12 @@ where self.ecdsa_client.clone(), self.block_height_monitor.latest_block_height(), ), + stream_timeout, ), handlers::config::Config::SuiMsgVerifier { cosmwasm_contract, rpc_url, - } => self.configure_handler( + } => self.create_handler_task( "sui-msg-verifier", handlers::sui_verify_msg::Handler::new( worker.clone(), @@ -243,11 +250,12 @@ where self.broadcaster.client(), self.block_height_monitor.latest_block_height(), ), + stream_timeout, ), handlers::config::Config::SuiWorkerSetVerifier { cosmwasm_contract, rpc_url, - } => self.configure_handler( + } => self.create_handler_task( "sui-worker-set-verifier", handlers::sui_verify_worker_set::Handler::new( worker.clone(), @@ -256,14 +264,21 @@ where self.broadcaster.client(), self.block_height_monitor.latest_block_height(), ), + stream_timeout, ), - } + }; + self.event_processor = self.event_processor.add_task(task); } Ok(self) } - fn configure_handler(&mut self, label: L, handler: H) + fn create_handler_task( + &mut self, + label: L, + handler: H, + stream_timeout: Duration, + ) -> CancellableTask> where L: AsRef, H: EventHandler + Send + Sync + 'static, @@ -276,18 +291,21 @@ where .state() .handler_block_height(label.as_ref()) { - None => Box::pin(self.event_sub.sub()), + None => Box::pin(self.event_publisher.subscribe()), Some(&completed_height) => Box::pin(event_sub::skip_to_block( - self.event_sub.sub(), + self.event_publisher.subscribe(), completed_height.increment(), )), }; - self.event_processor.add_handler(handler, sub); + + CancellableTask::create(move |token| { + event_processor::consume_events(handler, sub, stream_timeout, token) + }) } async fn run(self) -> (State, Result<(), Error>) { let Self { - event_sub, + event_publisher, event_processor, broadcaster, state_updater, @@ -312,33 +330,35 @@ where }); let (state_tx, mut state_rx) = oneshot::channel::(); - let mut set = JoinSet::new(); - set.spawn(event_sub.run().change_context(Error::EventSub)); - set.spawn(event_processor.run().change_context(Error::EventProcessor)); - set.spawn(broadcaster.run().change_context(Error::Broadcaster)); - set.spawn( - block_height_monitor - .run(token.clone()) - .change_context(Error::BlockHeightMonitor), - ); - set.spawn(async move { - // assert: the app must wait for this task to exit before trying to receive the state - state_tx - .send(state_updater.run().await) - .expect("the state receiver should still be alive"); - Ok(()) - }); - let execution_result = match (set.join_next().await, token.is_cancelled()) { - (Some(result), false) => { - token.cancel(); - result.unwrap_or_else(|err| Err(err).change_context(Error::Task)) - } - (Some(_), true) => Ok(()), - (None, _) => panic!("all tasks exited unexpectedly"), - }; + let execution_result = TaskGroup::new() + .add_task(CancellableTask::create(|token| { + block_height_monitor + .run(token) + .change_context(Error::BlockHeightMonitor) + })) + .add_task(CancellableTask::create(|token| { + event_publisher + .run(token) + .change_context(Error::EventPublisher) + })) + .add_task(CancellableTask::create(|token| { + event_processor + .run(token) + .change_context(Error::EventProcessor) + })) + .add_task(CancellableTask::create(|_| { + broadcaster.run().change_context(Error::Broadcaster) + })) + .add_task(CancellableTask::create(|_| async move { + // assert: the state updater only stops when all handlers that are updating their states have stopped + state_tx + .send(state_updater.run().await) + .map_err(|_| report!(Error::ReturnState)) + })) + .run(token) + .await; - while (set.join_next().await).is_some() {} // assert: all tasks have exited, it is safe to receive the state let state = state_rx .try_recv() @@ -350,8 +370,8 @@ where #[derive(Error, Debug)] pub enum Error { - #[error("event sub failed")] - EventSub, + #[error("event publisher failed")] + EventPublisher, #[error("event processor failed")] EventProcessor, #[error("broadcaster failed")] @@ -361,7 +381,7 @@ pub enum Error { #[error("connection failed")] Connection, #[error("task execution failed")] - Task, + Task(#[from] TaskError), #[error("failed to return updated state")] ReturnState, #[error("failed to load config")] diff --git a/ampd/src/tests/config_template.toml b/ampd/src/tests/config_template.toml index a80d25f3f..67c0e8de0 100644 --- a/ampd/src/tests/config_template.toml +++ b/ampd/src/tests/config_template.toml @@ -1,6 +1,7 @@ tm_jsonrpc = 'http://localhost:26657/' tm_grpc = 'tcp://localhost:9090' event_buffer_cap = 100000 +event_stream_timeout = '15s' [broadcast] chain_id = 'axelar-dojo-1' diff --git a/packages/axelar-wasm-std/src/error.rs b/packages/axelar-wasm-std/src/error.rs index fc48a2236..88a9b19db 100644 --- a/packages/axelar-wasm-std/src/error.rs +++ b/packages/axelar-wasm-std/src/error.rs @@ -24,3 +24,16 @@ where ContractError::Structured(LoggableError::from(&report)) } } + +/// Merges two error reports into one. If the result is Ok, the added error is returned. +pub fn extend_err( + result: error_stack::Result, + added_error: Report, +) -> error_stack::Result { + if let Err(mut base_err) = result { + base_err.extend_one(added_error); + Err(base_err) + } else { + Err(added_error) + } +} diff --git a/packages/axelar-wasm-std/src/lib.rs b/packages/axelar-wasm-std/src/lib.rs index 4d3eee689..d6fb35755 100644 --- a/packages/axelar-wasm-std/src/lib.rs +++ b/packages/axelar-wasm-std/src/lib.rs @@ -7,7 +7,7 @@ pub use crate::{ }; pub mod counter; -mod error; +pub mod error; pub mod event; pub mod flagset; mod fn_ext; From dc60c6e19c8025f9bd24a514b36a089ba75cef5e Mon Sep 17 00:00:00 2001 From: Houmaan Chamani Date: Wed, 31 Jan 2024 15:28:49 -0500 Subject: [PATCH 15/54] feat: add support deregistration to ampd src (#248) --- ampd/src/commands/deregister_chain_support.rs | 46 +++++++++++++++++++ ampd/src/commands/mod.rs | 3 ++ ampd/src/commands/register_chain_support.rs | 4 +- ampd/src/main.rs | 6 ++- 4 files changed, 56 insertions(+), 3 deletions(-) create mode 100644 ampd/src/commands/deregister_chain_support.rs diff --git a/ampd/src/commands/deregister_chain_support.rs b/ampd/src/commands/deregister_chain_support.rs new file mode 100644 index 000000000..8b7db779b --- /dev/null +++ b/ampd/src/commands/deregister_chain_support.rs @@ -0,0 +1,46 @@ +use std::path::Path; + +use axelar_wasm_std::nonempty; +use connection_router::state::ChainName; +use cosmrs::cosmwasm::MsgExecuteContract; +use cosmrs::tx::Msg; +use error_stack::Result; +use report::ResultCompatExt; +use service_registry::msg::ExecuteMsg; +use valuable::Valuable; + +use crate::commands::{broadcast_tx, worker_pub_key}; +use crate::config::Config; +use crate::{Error, PREFIX}; + +#[derive(clap::Args, Debug, Valuable)] +pub struct Args { + pub service_name: nonempty::String, + pub chains: Vec, +} + +pub async fn run(config: Config, state_path: &Path, args: Args) -> Result, Error> { + let pub_key = worker_pub_key(state_path, config.tofnd_config.clone()).await?; + + let msg = serde_json::to_vec(&ExecuteMsg::DeregisterChainSupport { + service_name: args.service_name.into(), + chains: args.chains, + }) + .expect("deregister chain support msg should serialize"); + + let tx = MsgExecuteContract { + sender: pub_key.account_id(PREFIX).change_context(Error::Tofnd)?, + contract: config.service_registry.cosmwasm_contract.as_ref().clone(), + msg, + funds: vec![], + } + .into_any() + .expect("failed to serialize proto message"); + + let tx_hash = broadcast_tx(config, tx, pub_key).await?.txhash; + + Ok(Some(format!( + "successfully broadcast deregister chain support transaction, tx hash: {}", + tx_hash + ))) +} diff --git a/ampd/src/commands/mod.rs b/ampd/src/commands/mod.rs index 5fc4b467c..e0bbb73e8 100644 --- a/ampd/src/commands/mod.rs +++ b/ampd/src/commands/mod.rs @@ -23,6 +23,7 @@ use crate::{tofnd, PREFIX}; pub mod bond_worker; pub mod daemon; +pub mod deregister_chain_support; pub mod register_chain_support; pub mod register_public_key; pub mod worker_address; @@ -35,6 +36,8 @@ pub enum SubCommand { BondWorker(bond_worker::Args), /// Register chain support to the service registry contract RegisterChainSupport(register_chain_support::Args), + /// Deregister chain support to the service registry contract + DeregisterChainSupport(deregister_chain_support::Args), /// Register public key to the multisig contract RegisterPublicKey, /// Query the worker address diff --git a/ampd/src/commands/register_chain_support.rs b/ampd/src/commands/register_chain_support.rs index ff26afe83..563c6e1ce 100644 --- a/ampd/src/commands/register_chain_support.rs +++ b/ampd/src/commands/register_chain_support.rs @@ -26,7 +26,7 @@ pub async fn run(config: Config, state_path: &Path, args: Args) -> Result