From 9cc176c31d1843263b5e4128f52da4cdc5562db2 Mon Sep 17 00:00:00 2001 From: Song Zhou Date: Wed, 19 Jan 2022 14:59:14 +0000 Subject: [PATCH] init port `near-store` --- .github/workflows/check.yml | 10 +- Cargo.toml | 2 + .../src/skw-vm-primitives/src/errors.rs | 24 + mock-enclave/src/skw-vm-runtime/src/ext.rs | 226 +-- mock-enclave/src/skw-vm-runtime/src/lib.rs | 390 ++--- .../src/skw-vm-runtime/src/verifier.rs | 2 +- mock-enclave/src/skw-vm-store/Cargo.lock | 1365 +++++++++++++++-- mock-enclave/src/skw-vm-store/Cargo.toml | 62 +- mock-enclave/src/skw-vm-store/README.md | 3 - .../src/skw-vm-store/benches/store_bench.rs | 100 ++ .../src/skw-vm-store/benches/trie_bench.rs | 53 + mock-enclave/src/skw-vm-store/src/db.rs | 966 ++++++++++++ .../src/skw-vm-store/src/db/refcount.rs | 138 ++ .../src/skw-vm-store/src/db/v6_to_v7.rs | 75 + mock-enclave/src/skw-vm-store/src/errors.rs | 0 mock-enclave/src/skw-vm-store/src/lib.rs | 543 ++++++- .../src/skw-vm-store/src/migrations.rs | 840 ++++++++++ .../skw-vm-store/src/migrations/v6_to_v7.rs | 126 ++ .../skw-vm-store/src/migrations/v8_to_v9.rs | 80 + .../src/skw-vm-store/src/test_utils.rs | 145 ++ .../skw-vm-store/src/trie/insert_delete.rs | 651 ++++++++ .../src/skw-vm-store/src/trie/iterator.rs | 480 ++++++ mock-enclave/src/skw-vm-store/src/trie/mod.rs | 1175 ++++++++++++++ .../src/skw-vm-store/src/trie/nibble_slice.rs | 355 +++++ .../src/skw-vm-store/src/trie/shard_tries.rs | 412 +++++ .../src/skw-vm-store/src/trie/split_state.rs | 813 ++++++++++ .../src/skw-vm-store/src/trie/state_parts.rs | 726 +++++++++ .../src/skw-vm-store/src/trie/trie_storage.rs | 210 +++ .../src/skw-vm-store/src/trie/trie_tests.rs | 128 ++ .../src/skw-vm-store/src/trie/update.rs | 511 ++++++ .../lockable-fungible-token/Cargo.lock | 26 +- .../examples/status-message/Cargo.lock | 26 +- .../skw-contract-sdk/src/environment/env.rs | 27 - .../src/environment/mock/external.rs | 97 +- .../src/environment/mock/mocked_blockchain.rs | 26 - .../src/environment/mock/mod.rs | 2 +- .../skw-contract-sdk/src/promise.rs | 52 - .../src/test_utils/context.rs | 8 +- skw-contract-sdk/skw-sdk-sim/Cargo.toml | 10 +- skw-contract-sdk/skw-sdk-sim/src/lib.rs | 6 +- skw-contract-sdk/skw-sdk-sim/src/runtime.rs | 121 +- skw-contract-sdk/skw-sdk-sim/src/user.rs | 49 +- skw-contract-sdk/sys/src/lib.rs | 8 - 43 files changed, 10286 insertions(+), 783 deletions(-) delete mode 100644 mock-enclave/src/skw-vm-store/README.md create mode 100644 mock-enclave/src/skw-vm-store/benches/store_bench.rs create mode 100644 mock-enclave/src/skw-vm-store/benches/trie_bench.rs create mode 100644 mock-enclave/src/skw-vm-store/src/db.rs create mode 100644 mock-enclave/src/skw-vm-store/src/db/refcount.rs create mode 100644 mock-enclave/src/skw-vm-store/src/db/v6_to_v7.rs delete mode 100644 mock-enclave/src/skw-vm-store/src/errors.rs create mode 100644 mock-enclave/src/skw-vm-store/src/migrations.rs create mode 100644 mock-enclave/src/skw-vm-store/src/migrations/v6_to_v7.rs create mode 100644 mock-enclave/src/skw-vm-store/src/migrations/v8_to_v9.rs create mode 100644 mock-enclave/src/skw-vm-store/src/test_utils.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/insert_delete.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/iterator.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/mod.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/nibble_slice.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/shard_tries.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/split_state.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/state_parts.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/trie_storage.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/trie_tests.rs create mode 100644 mock-enclave/src/skw-vm-store/src/trie/update.rs diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 049a1cd..17e529f 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -34,14 +34,14 @@ jobs: run: | yarn blockchain:ci - - name: Check Contract SDK Compile - run: | - yarn contract-sdk:test - - name: Check Enclave Compile run: | yarn enclave:ci - + - name: Check Mock-Enclave VM Compile run: | yarn mock-enclave:test + + - name: Check Contract SDK Compile + run: | + yarn contract-sdk:test diff --git a/Cargo.toml b/Cargo.toml index 6642039..85212e9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,8 @@ exclude = [ 'mock-enclave/src/skw-vm-engine', 'mock-enclave/src/skw-vm-engine-cli', 'mock-enclave/src/near-test-contracts', + 'mock-enclave/src/skw-vm-runtime', + 'mock-enclave/src/skw-vm-store', 'skw-contract-sdk/skw-contract-sdk', 'skw-contract-sdk/skw-sdk-macros', 'skw-contract-sdk/skw-sdk-sim', diff --git a/mock-enclave/src/skw-vm-primitives/src/errors.rs b/mock-enclave/src/skw-vm-primitives/src/errors.rs index 3acfcbe..57112b8 100644 --- a/mock-enclave/src/skw-vm-primitives/src/errors.rs +++ b/mock-enclave/src/skw-vm-primitives/src/errors.rs @@ -476,6 +476,30 @@ pub enum InvalidAccessKeyError { DepositWithFunctionCall, } + +/// Internal +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StorageError { + /// Key-value db internal failure + StorageInternalError, + /// Storage is PartialStorage and requested a missing trie node + TrieNodeMissing, + /// Either invalid state or key-value db is corrupted. + /// For PartialStorage it cannot be corrupted. + /// Error message is unreliable and for debugging purposes only. It's also probably ok to + /// panic in every place that produces this error. + /// We can check if db is corrupted by verifying everything in the state trie. + StorageInconsistentState(String), +} + +impl std::fmt::Display for StorageError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + f.write_str(&format!("{:?}", self)) + } +} + +impl std::error::Error for StorageError {} + impl Display for InvalidAccessKeyError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { match self { diff --git a/mock-enclave/src/skw-vm-runtime/src/ext.rs b/mock-enclave/src/skw-vm-runtime/src/ext.rs index 17fefa2..85c001d 100644 --- a/mock-enclave/src/skw-vm-runtime/src/ext.rs +++ b/mock-enclave/src/skw-vm-runtime/src/ext.rs @@ -240,10 +240,10 @@ impl<'a> External for RuntimeExt<'a> { Ok(new_receipt_index) } - fn append_action_create_account(&mut self, receipt_index: u64) -> ExtResult<()> { - self.append_action(receipt_index, Action::CreateAccount(CreateAccountAction {})); - Ok(()) - } + // fn append_action_create_account(&mut self, receipt_index: u64) -> ExtResult<()> { + // self.append_action(receipt_index, Action::CreateAccount(CreateAccountAction {})); + // Ok(()) + // } fn append_action_deploy_contract( &mut self, @@ -275,118 +275,118 @@ impl<'a> External for RuntimeExt<'a> { Ok(()) } - fn append_action_transfer(&mut self, receipt_index: u64, deposit: u128) -> ExtResult<()> { - self.append_action(receipt_index, Action::Transfer(TransferAction { deposit })); - Ok(()) - } - - fn append_action_stake( - &mut self, - receipt_index: u64, - stake: u128, - public_key: Vec, - ) -> ExtResult<()> { - self.append_action( - receipt_index, - Action::Stake(StakeAction { - stake, - public_key: PublicKey::try_from_slice(&public_key) - .map_err(|_| HostError::InvalidPublicKey)?, - }), - ); - Ok(()) - } - - fn append_action_add_key_with_full_access( - &mut self, - receipt_index: u64, - public_key: Vec, - nonce: u64, - ) -> ExtResult<()> { - self.append_action( - receipt_index, - Action::AddKey(AddKeyAction { - public_key: PublicKey::try_from_slice(&public_key) - .map_err(|_| HostError::InvalidPublicKey)?, - access_key: AccessKey { nonce, permission: AccessKeyPermission::FullAccess }, - }), - ); - Ok(()) - } - - fn append_action_add_key_with_function_call( - &mut self, - receipt_index: u64, - public_key: Vec, - nonce: u64, - allowance: Option, - receiver_id: AccountId, - method_names: Vec>, - ) -> ExtResult<()> { - self.append_action( - receipt_index, - Action::AddKey(AddKeyAction { - public_key: PublicKey::try_from_slice(&public_key) - .map_err(|_| HostError::InvalidPublicKey)?, - access_key: AccessKey { - nonce, - permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { - allowance, - receiver_id: receiver_id.into(), - method_names: method_names - .into_iter() - .map(|method_name| { - String::from_utf8(method_name) - .map_err(|_| HostError::InvalidMethodName) - }) - .collect::, _>>()?, - }), - }, - }), - ); - Ok(()) - } - - fn append_action_delete_key( - &mut self, - receipt_index: u64, - public_key: Vec, - ) -> ExtResult<()> { - self.append_action( - receipt_index, - Action::DeleteKey(DeleteKeyAction { - public_key: PublicKey::try_from_slice(&public_key) - .map_err(|_| HostError::InvalidPublicKey)?, - }), - ); - Ok(()) - } - - fn append_action_delete_account( - &mut self, - receipt_index: u64, - beneficiary_id: AccountId, - ) -> ExtResult<()> { - self.append_action( - receipt_index, - Action::DeleteAccount(DeleteAccountAction { beneficiary_id }), - ); - Ok(()) - } + // fn append_action_transfer(&mut self, receipt_index: u64, deposit: u128) -> ExtResult<()> { + // self.append_action(receipt_index, Action::Transfer(TransferAction { deposit })); + // Ok(()) + // } + + // fn append_action_stake( + // &mut self, + // receipt_index: u64, + // stake: u128, + // public_key: Vec, + // ) -> ExtResult<()> { + // self.append_action( + // receipt_index, + // Action::Stake(StakeAction { + // stake, + // public_key: PublicKey::try_from_slice(&public_key) + // .map_err(|_| HostError::InvalidPublicKey)?, + // }), + // ); + // Ok(()) + // } + + // fn append_action_add_key_with_full_access( + // &mut self, + // receipt_index: u64, + // public_key: Vec, + // nonce: u64, + // ) -> ExtResult<()> { + // self.append_action( + // receipt_index, + // Action::AddKey(AddKeyAction { + // public_key: PublicKey::try_from_slice(&public_key) + // .map_err(|_| HostError::InvalidPublicKey)?, + // access_key: AccessKey { nonce, permission: AccessKeyPermission::FullAccess }, + // }), + // ); + // Ok(()) + // } + + // fn append_action_add_key_with_function_call( + // &mut self, + // receipt_index: u64, + // public_key: Vec, + // nonce: u64, + // allowance: Option, + // receiver_id: AccountId, + // method_names: Vec>, + // ) -> ExtResult<()> { + // self.append_action( + // receipt_index, + // Action::AddKey(AddKeyAction { + // public_key: PublicKey::try_from_slice(&public_key) + // .map_err(|_| HostError::InvalidPublicKey)?, + // access_key: AccessKey { + // nonce, + // permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + // allowance, + // receiver_id: receiver_id.into(), + // method_names: method_names + // .into_iter() + // .map(|method_name| { + // String::from_utf8(method_name) + // .map_err(|_| HostError::InvalidMethodName) + // }) + // .collect::, _>>()?, + // }), + // }, + // }), + // ); + // Ok(()) + // } + + // fn append_action_delete_key( + // &mut self, + // receipt_index: u64, + // public_key: Vec, + // ) -> ExtResult<()> { + // self.append_action( + // receipt_index, + // Action::DeleteKey(DeleteKeyAction { + // public_key: PublicKey::try_from_slice(&public_key) + // .map_err(|_| HostError::InvalidPublicKey)?, + // }), + // ); + // Ok(()) + // } + + // fn append_action_delete_account( + // &mut self, + // receipt_index: u64, + // beneficiary_id: AccountId, + // ) -> ExtResult<()> { + // self.append_action( + // receipt_index, + // Action::DeleteAccount(DeleteAccountAction { beneficiary_id }), + // ); + // Ok(()) + // } fn get_touched_nodes_count(&self) -> u64 { self.trie_update.trie.counter.get() } - fn validator_stake(&self, account_id: &AccountId) -> ExtResult> { - self.epoch_info_provider - .validator_stake(self.epoch_id, self.prev_block_hash, account_id) - .map_err(|e| ExternalError::ValidatorError(e).into()) - } - - fn validator_total_stake(&self) -> ExtResult { - self.epoch_info_provider - .validator_total_stake(self.epoch_id, self.prev_block_hash) - .map_err(|e| ExternalError::ValidatorError(e).into()) - } + // fn validator_stake(&self, account_id: &AccountId) -> ExtResult> { + // self.epoch_info_provider + // .validator_stake(self.epoch_id, self.prev_block_hash, account_id) + // .map_err(|e| ExternalError::ValidatorError(e).into()) + // } + + // fn validator_total_stake(&self) -> ExtResult { + // self.epoch_info_provider + // .validator_total_stake(self.epoch_id, self.prev_block_hash) + // .map_err(|e| ExternalError::ValidatorError(e).into()) + // } } diff --git a/mock-enclave/src/skw-vm-runtime/src/lib.rs b/mock-enclave/src/skw-vm-runtime/src/lib.rs index 4b3b774..aa4888e 100644 --- a/mock-enclave/src/skw-vm-runtime/src/lib.rs +++ b/mock-enclave/src/skw-vm-runtime/src/lib.rs @@ -77,19 +77,19 @@ mod verifier; const EXPECT_ACCOUNT_EXISTS: &str = "account exists, checked above"; /// Contains information to update validators accounts at the first block of a new epoch. -#[derive(Debug)] -pub struct ValidatorAccountsUpdate { - /// Maximum stake across last 3 epochs. - pub stake_info: HashMap, - /// Rewards to distribute to validators. - pub validator_rewards: HashMap, - /// Stake proposals from the last chunk. - pub last_proposals: HashMap, - /// The ID of the protocol treasure account if it belongs to the current shard. - pub protocol_treasury_account_id: Option, - /// Accounts to slash and the slashed amount (None means everything) - pub slashing_info: HashMap>, -} +// #[derive(Debug)] +// pub struct ValidatorAccountsUpdate { +// /// Maximum stake across last 3 epochs. +// pub stake_info: HashMap, +// /// Rewards to distribute to validators. +// pub validator_rewards: HashMap, +// /// Stake proposals from the last chunk. +// pub last_proposals: HashMap, +// /// The ID of the protocol treasure account if it belongs to the current shard. +// pub protocol_treasury_account_id: Option, +// /// Accounts to slash and the slashed amount (None means everything) +// pub slashing_info: HashMap>, +// } #[derive(Debug)] pub struct VerificationResult { @@ -116,7 +116,7 @@ pub struct ApplyStats { pub struct ApplyResult { pub state_root: StateRoot, pub trie_changes: TrieChanges, - pub validator_proposals: Vec, + // pub validator_proposals: Vec, pub outgoing_receipts: Vec, pub outcomes: Vec, pub state_changes: Vec, @@ -133,7 +133,7 @@ pub struct ActionResult { pub result: Result, pub logs: Vec, pub new_receipts: Vec, - pub validator_proposals: Vec, + // pub validator_proposals: Vec, pub profile: ProfileData, } @@ -161,10 +161,10 @@ impl ActionResult { } if self.result.is_ok() { self.new_receipts.append(&mut next_result.new_receipts); - self.validator_proposals.append(&mut next_result.validator_proposals); + // self.validator_proposals.append(&mut next_result.validator_proposals); } else { self.new_receipts.clear(); - self.validator_proposals.clear(); + // self.validator_proposals.clear(); } Ok(()) } @@ -179,7 +179,7 @@ impl Default for ActionResult { result: Ok(ReturnData::None), logs: vec![], new_receipts: vec![], - validator_proposals: vec![], + // validator_proposals: vec![], profile: Default::default(), } } @@ -242,6 +242,7 @@ impl Runtime { }); let transaction = &signed_transaction.transaction; let receipt_id = create_receipt_id_from_transaction( + // TODO: remove protocol_version stuff apply_state.current_protocol_version, signed_transaction, &apply_state.prev_block_hash, @@ -273,7 +274,9 @@ impl Runtime { executor_id: transaction.signer_id.clone(), // TODO: profile data is only counted in apply_action, which only happened at process_receipt // VerificationResult needs updates to incorporate profile data to support profile data of txns - metadata: ExecutionMetadata::V1, + // metadata: ExecutionMetadata::V1, + // DIFF: added ProfileData in sync with skw-vm-primitives + profile_data: ProfileData, }, }; Ok((receipt, outcome)) @@ -314,36 +317,39 @@ impl Runtime { let account_id = &receipt.receiver_id; let is_the_only_action = actions.len() == 1; let is_refund = AccountId::is_system(&receipt.predecessor_id); - // Account validation - if let Err(e) = check_account_existence( - action, - account, - account_id, - apply_state.current_protocol_version, - is_the_only_action, - is_refund, - ) { - result.result = Err(e); - return Ok(result); - } - // Permission validation - if let Err(e) = check_actor_permissions(action, account, actor_id, account_id) { - result.result = Err(e); - return Ok(result); - } + + // TODO: correctly remove both account & permission validation + // // Account validation + // if let Err(e) = check_account_existence( + // action, + // account, + // account_id, + // apply_state.current_protocol_version, + // is_the_only_action, + // is_refund, + // ) { + // result.result = Err(e); + // return Ok(result); + // } + // // Permission validation + // if let Err(e) = check_actor_permissions(action, account, actor_id, account_id) { + // result.result = Err(e); + // return Ok(result); + // } + match action { - Action::CreateAccount(_) => { - // metrics::ACTION_CREATE_ACCOUNT_TOTAL.inc(); - action_create_account( - &apply_state.config.transaction_costs, - &apply_state.config.account_creation_config, - account, - actor_id, - &receipt.receiver_id, - &receipt.predecessor_id, - &mut result, - ); - } + // Action::CreateAccount(_) => { + // // metrics::ACTION_CREATE_ACCOUNT_TOTAL.inc(); + // action_create_account( + // &apply_state.config.transaction_costs, + // &apply_state.config.account_creation_config, + // account, + // actor_id, + // &receipt.receiver_id, + // &receipt.predecessor_id, + // &mut result, + // ); + // } Action::DeployContract(deploy_contract) => { // metrics::ACTION_DEPLOY_CONTRACT_TOTAL.inc(); action_deploy_contract( @@ -373,99 +379,99 @@ impl Runtime { epoch_info_provider, )?; } - Action::Transfer(transfer) => { - // metrics::ACTION_TRANSFER_TOTAL.inc(); - if let Some(account) = account.as_mut() { - action_transfer(account, transfer)?; - // Check if this is a gas refund, then try to refund the access key allowance. - if is_refund && action_receipt.signer_id == receipt.receiver_id { - try_refund_allowance( - state_update, - &receipt.receiver_id, - &action_receipt.signer_public_key, - transfer, - )?; - } - } else { - // Implicit account creation - debug_assert!(is_implicit_account_creation_enabled( - apply_state.current_protocol_version - )); - debug_assert!(!is_refund); - action_implicit_account_creation_transfer( - state_update, - &apply_state.config.transaction_costs, - account, - actor_id, - &receipt.receiver_id, - transfer, - apply_state.block_index, - apply_state.current_protocol_version, - ); - } - } - Action::Stake(stake) => { - // metrics::ACTION_STAKE_TOTAL.inc(); - action_stake( - account.as_mut().expect(EXPECT_ACCOUNT_EXISTS), - &mut result, - account_id, - stake, - &apply_state.prev_block_hash, - epoch_info_provider, - #[cfg(feature = "protocol_feature_chunk_only_producers")] - false, - )?; - } - Action::AddKey(add_key) => { - // metrics::ACTION_ADD_KEY_TOTAL.inc(); - action_add_key( - apply_state, - state_update, - account.as_mut().expect(EXPECT_ACCOUNT_EXISTS), - &mut result, - account_id, - add_key, - )?; - } - Action::DeleteKey(delete_key) => { - // metrics::ACTION_DELETE_KEY_TOTAL.inc(); - action_delete_key( - &apply_state.config.transaction_costs, - state_update, - account.as_mut().expect(EXPECT_ACCOUNT_EXISTS), - &mut result, - account_id, - delete_key, - apply_state.current_protocol_version, - )?; - } - Action::DeleteAccount(delete_account) => { - // metrics::ACTION_DELETE_ACCOUNT_TOTAL.inc(); - action_delete_account( - state_update, - account, - actor_id, - receipt, - &mut result, - account_id, - delete_account, - apply_state.current_protocol_version, - )?; - } - #[cfg(feature = "protocol_feature_chunk_only_producers")] - Action::StakeChunkOnly(stake) => { - // metrics::ACTION_STAKE_CHUNK_ONLY_TOTAL.inc(); - action_stake( - account.as_mut().expect(EXPECT_ACCOUNT_EXISTS), - &mut result, - account_id, - stake, - &apply_state.prev_block_hash, - epoch_info_provider, - true, - )?; - } + // Action::Transfer(transfer) => { + // // metrics::ACTION_TRANSFER_TOTAL.inc(); + // if let Some(account) = account.as_mut() { + // action_transfer(account, transfer)?; + // // Check if this is a gas refund, then try to refund the access key allowance. + // if is_refund && action_receipt.signer_id == receipt.receiver_id { + // try_refund_allowance( + // state_update, + // &receipt.receiver_id, + // &action_receipt.signer_public_key, + // transfer, + // )?; + // } + // } else { + // // Implicit account creation + // debug_assert!(is_implicit_account_creation_enabled( + // apply_state.current_protocol_version + // )); + // debug_assert!(!is_refund); + // action_implicit_account_creation_transfer( + // state_update, + // &apply_state.config.transaction_costs, + // account, + // actor_id, + // &receipt.receiver_id, + // transfer, + // apply_state.block_index, + // apply_state.current_protocol_version, + // ); + // } + // } + // Action::Stake(stake) => { + // // metrics::ACTION_STAKE_TOTAL.inc(); + // action_stake( + // account.as_mut().expect(EXPECT_ACCOUNT_EXISTS), + // &mut result, + // account_id, + // stake, + // &apply_state.prev_block_hash, + // epoch_info_provider, + // #[cfg(feature = "protocol_feature_chunk_only_producers")] + // false, + // )?; + // } + // Action::AddKey(add_key) => { + // // metrics::ACTION_ADD_KEY_TOTAL.inc(); + // action_add_key( + // apply_state, + // state_update, + // account.as_mut().expect(EXPECT_ACCOUNT_EXISTS), + // &mut result, + // account_id, + // add_key, + // )?; + // } + // Action::DeleteKey(delete_key) => { + // // metrics::ACTION_DELETE_KEY_TOTAL.inc(); + // action_delete_key( + // &apply_state.config.transaction_costs, + // state_update, + // account.as_mut().expect(EXPECT_ACCOUNT_EXISTS), + // &mut result, + // account_id, + // delete_key, + // apply_state.current_protocol_version, + // )?; + // } + // Action::DeleteAccount(delete_account) => { + // // metrics::ACTION_DELETE_ACCOUNT_TOTAL.inc(); + // action_delete_account( + // state_update, + // account, + // actor_id, + // receipt, + // &mut result, + // account_id, + // delete_account, + // apply_state.current_protocol_version, + // )?; + // } + // #[cfg(feature = "protocol_feature_chunk_only_producers")] + // Action::StakeChunkOnly(stake) => { + // // metrics::ACTION_STAKE_CHUNK_ONLY_TOTAL.inc(); + // action_stake( + // account.as_mut().expect(EXPECT_ACCOUNT_EXISTS), + // &mut result, + // account_id, + // stake, + // &apply_state.prev_block_hash, + // epoch_info_provider, + // true, + // )?; + // } }; Ok(result) } @@ -1170,67 +1176,73 @@ impl Runtime { &self, trie: Trie, root: CryptoHash, - validator_accounts_update: &Option, + // validator_accounts_update: &Option, apply_state: &ApplyState, incoming_receipts: &[Receipt], transactions: &[SignedTransaction], - epoch_info_provider: &dyn EpochInfoProvider, - states_to_patch: Option>, + // epoch_info_provider: &dyn EpochInfoProvider, + // states_to_patch: Option>, ) -> Result { let _span = tracing::debug_span!(target: "runtime", "Runtime::apply").entered(); - if states_to_patch.is_some() && !cfg!(feature = "sandbox") { - panic!("Can only patch state in sandbox mode"); - } + // if states_to_patch.is_some() && !cfg!(feature = "sandbox") { + // panic!("Can only patch state in sandbox mode"); + // } let trie = Rc::new(trie); + + // TODO:: maybe we can use StoreUpdate instead of TrieUpdate let initial_state = TrieUpdate::new(trie.clone(), root); let mut state_update = TrieUpdate::new(trie.clone(), root); let mut stats = ApplyStats::default(); - if let Some(validator_accounts_update) = validator_accounts_update { - self.update_validator_accounts( - &mut state_update, - validator_accounts_update, - &mut stats, - )?; - } - - let (gas_used_for_migrations, mut receipts_to_restore) = self - .apply_migrations( - &mut state_update, - &apply_state.migration_data, - &apply_state.migration_flags, - apply_state.current_protocol_version, - ) - .map_err(RuntimeError::StorageError)?; - // If we have receipts that need to be restored, prepend them to the list of incoming receipts - let incoming_receipts = if receipts_to_restore.is_empty() { - incoming_receipts - } else { - receipts_to_restore.extend_from_slice(incoming_receipts); - receipts_to_restore.as_slice() - }; - - if !apply_state.is_new_chunk - && apply_state.current_protocol_version - >= ProtocolFeature::FixApplyChunks.protocol_version() - { - let (trie_changes, state_changes) = state_update.finalize()?; - let proof = trie.recorded_storage(); - return Ok(ApplyResult { - state_root: trie_changes.new_root, - trie_changes, - validator_proposals: vec![], - outgoing_receipts: vec![], - outcomes: vec![], - state_changes, - stats, - processed_delayed_receipts: vec![], - proof, - }); - } + // TODO: remove + // if let Some(validator_accounts_update) = validator_accounts_update { + // self.update_validator_accounts( + // &mut state_update, + // validator_accounts_update, + // &mut stats, + // )?; + // } + + + // TODO: keep but not used for now - migration stuff + // let (gas_used_for_migrations, mut receipts_to_restore) = self + // .apply_migrations( + // &mut state_update, + // &apply_state.migration_data, + // &apply_state.migration_flags, + // apply_state.current_protocol_version, + // ) + // .map_err(RuntimeError::StorageError)?; + // // If we have receipts that need to be restored, prepend them to the list of incoming receipts + // let incoming_receipts = if receipts_to_restore.is_empty() { + // incoming_receipts + // } else { + // receipts_to_restore.extend_from_slice(incoming_receipts); + // receipts_to_restore.as_slice() + // }; + + // NOTE: not sure what this is ... might have something to do w/ chain consensus + // if !apply_state.is_new_chunk + // && apply_state.current_protocol_version + // >= ProtocolFeature::FixApplyChunks.protocol_version() + // { + // let (trie_changes, state_changes) = state_update.finalize()?; + // let proof = trie.recorded_storage(); + // return Ok(ApplyResult { + // state_root: trie_changes.new_root, + // trie_changes, + // validator_proposals: vec![], + // outgoing_receipts: vec![], + // outcomes: vec![], + // state_changes, + // stats, + // processed_delayed_receipts: vec![], + // proof, + // }); + // } let mut outgoing_receipts = Vec::new(); let mut validator_proposals = vec![]; diff --git a/mock-enclave/src/skw-vm-runtime/src/verifier.rs b/mock-enclave/src/skw-vm-runtime/src/verifier.rs index d674f5e..1bb9bff 100644 --- a/mock-enclave/src/skw-vm-runtime/src/verifier.rs +++ b/mock-enclave/src/skw-vm-runtime/src/verifier.rs @@ -32,7 +32,7 @@ pub fn validate_transaction( gas_price: Balance, signed_transaction: &SignedTransaction, verify_signature: bool, - current_protocol_version: ProtocolVersion, + // current_protocol_version: ProtocolVersion, ) -> Result { let transaction = &signed_transaction.transaction; let signer_id = &transaction.signer_id; diff --git a/mock-enclave/src/skw-vm-store/Cargo.lock b/mock-enclave/src/skw-vm-store/Cargo.lock index 820e944..8a303d6 100644 --- a/mock-enclave/src/skw-vm-store/Cargo.lock +++ b/mock-enclave/src/skw-vm-store/Cargo.lock @@ -2,12 +2,107 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "ahash" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" + +[[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom 0.2.4", + "once_cell", + "version_check", +] + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + [[package]] name = "autocfg" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" + +[[package]] +name = "bencher" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dfdb4953a096c551ce9ace855a604d702e6e62d77fac690575ae347571717f5" + +[[package]] +name = "bindgen" +version = "0.59.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" +dependencies = [ + "crypto-mac", + "digest", + "opaque-debug", +] + [[package]] name = "block-buffer" version = "0.9.0" @@ -17,12 +112,155 @@ dependencies = [ "generic-array", ] +[[package]] +name = "borsh" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18dda7dc709193c0d86a1a51050a926dc3df1cf262ec46a23a25dba421ea1924" +dependencies = [ + "borsh-derive", + "hashbrown 0.9.1", +] + +[[package]] +name = "borsh-derive" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "684155372435f578c0fa1acd13ebbb182cc19d6b38b64ae7901da4393217d264" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2102f62f8b6d3edeab871830782285b64cc1830168094db05c8e458f209bc5c3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "196c978c4c9b0b142d446ef3240690bf5a8a33497074a113ff9a337ccb750483" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + +[[package]] +name = "byte-slice-cast" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c751592b77c499e7bce34d99d67c2c11bdc0574e9a488ddade14150a4698" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "bytesize" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c58ec36aac5066d5ca17df51b3e70279f5670a72102f5752cb7e7c856adfc70" + +[[package]] +name = "c2-chacha" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d27dae93fe7b1e0424dc57179ac396908c26b035a87234809f5c4dfd1b47dc80" +dependencies = [ + "cipher", + "ppv-lite86", +] + +[[package]] +name = "cc" +version = "1.0.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +dependencies = [ + "jobserver", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits", + "serde", + "time", + "winapi", +] + +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array", +] + +[[package]] +name = "clang-sys" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "cpufeatures" version = "0.2.1" @@ -32,6 +270,48 @@ dependencies = [ "libc", ] +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array", + "subtle", +] + +[[package]] +name = "curve25519-dalek" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +dependencies = [ + "byteorder", + "digest", + "rand_core 0.5.1", + "subtle", + "zeroize", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn", +] + [[package]] name = "digest" version = "0.9.0" @@ -42,190 +322,1069 @@ dependencies = [ ] [[package]] -name = "downcast-rs" -version = "1.2.0" +name = "easy-ext" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +checksum = "53aff6fdc1b181225acdcb5b14c47106726fd8e486707315b1b138baed68ee31" [[package]] -name = "generic-array" -version = "0.14.5" +name = "ed25519" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" dependencies = [ - "typenum", - "version_check", + "signature", ] [[package]] -name = "libc" -version = "0.2.112" +name = "ed25519-dalek" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand 0.7.3", + "serde", + "sha2", + "zeroize", +] [[package]] -name = "libm" -version = "0.2.1" +name = "elastic-array" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" +checksum = "0d63720ea2bc2e1b79f7aa044d9dc0b825f9ccb6930b32120f8fb9e873aa84bc" +dependencies = [ + "heapsize", +] [[package]] -name = "memory_units" -version = "0.4.0" +name = "fastrand" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" +checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" +dependencies = [ + "instant", +] [[package]] -name = "num-bigint" -version = "0.4.3" +name = "fixed-hash" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ - "autocfg", - "num-integer", - "num-traits", + "byteorder", + "rand 0.8.4", + "rustc-hex", + "static_assertions", ] [[package]] -name = "num-integer" -version = "0.1.44" +name = "fs2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ - "autocfg", - "num-traits", + "libc", + "winapi", ] [[package]] -name = "num-rational" -version = "0.3.2" +name = "funty" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + +[[package]] +name = "generic-array" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ - "autocfg", - "num-integer", - "num-traits", + "typenum", + "version_check", ] [[package]] -name = "num-rational" -version = "0.4.0" +name = "getrandom" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "autocfg", - "num-bigint", - "num-integer", - "num-traits", + "cfg-if 1.0.0", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] -name = "num-traits" -version = "0.2.14" +name = "getrandom" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ - "autocfg", + "cfg-if 1.0.0", + "libc", + "wasi 0.10.3+wasi-snapshot-preview1", ] [[package]] -name = "opaque-debug" +name = "glob" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] -name = "parity-wasm" -version = "0.42.2" +name = "hashbrown" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +dependencies = [ + "ahash 0.4.7", +] [[package]] -name = "serde" -version = "1.0.133" +name = "hashbrown" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +dependencies = [ + "ahash 0.7.6", +] [[package]] -name = "sha2" -version = "0.9.9" +name = "hashbrown" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" dependencies = [ - "block-buffer", - "cfg-if", - "cpufeatures", - "digest", - "opaque-debug", + "ahash 0.7.6", ] [[package]] -name = "skw-vm-primitives" -version = "0.1.0" +name = "heapsize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1679e6ea370dee694f91f1dc469bf94cf8f52051d147aec3e1f9497c6fc22461" dependencies = [ - "num-rational 0.3.2", - "serde", - "sha2", - "wasmi", + "winapi", ] [[package]] -name = "spin" -version = "0.9.2" +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] [[package]] -name = "typenum" -version = "1.15.0" +name = "hex" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] -name = "version_check" -version = "0.9.4" +name = "impl-codec" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" +dependencies = [ + "parity-scale-codec", +] [[package]] -name = "wasmi" -version = "0.11.0" -source = "git+https://github.com/paritytech/wasmi#ad3e00d5f83e4ff4c32e7c883d1eb9ae24f6f2ba" +name = "impl-trait-for-tuples" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5dacb10c5b3bb92d46ba347505a9041e676bb20ad220101326bffb0c93031ee" dependencies = [ - "parity-wasm", - "wasmi-validation", - "wasmi_core", - "wasmi_v1", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "wasmi-validation" -version = "0.4.1" -source = "git+https://github.com/paritytech/wasmi#ad3e00d5f83e4ff4c32e7c883d1eb9ae24f6f2ba" +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "parity-wasm", + "cfg-if 1.0.0", ] [[package]] -name = "wasmi_core" -version = "0.1.0" -source = "git+https://github.com/paritytech/wasmi#ad3e00d5f83e4ff4c32e7c883d1eb9ae24f6f2ba" +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + +[[package]] +name = "jobserver" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" dependencies = [ - "downcast-rs", - "libm", - "memory_units", - "num-rational 0.4.0", - "num-traits", - "parity-wasm", + "libc", ] [[package]] -name = "wasmi_v1" -version = "0.11.0" -source = "git+https://github.com/paritytech/wasmi#ad3e00d5f83e4ff4c32e7c883d1eb9ae24f6f2ba" +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.112" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" + +[[package]] +name = "libloading" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +dependencies = [ + "cfg-if 1.0.0", + "winapi", +] + +[[package]] +name = "librocksdb-sys" +version = "6.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" +dependencies = [ + "bindgen", + "cc", + "glob", + "libc", +] + +[[package]] +name = "lru" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" +dependencies = [ + "hashbrown 0.11.2", +] + +[[package]] +name = "memchr" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "near-account-id" +version = "0.0.0" +source = "git+https://github.com/near/nearcore#cb8b1d2c09f2126d4fd01f94b6bd24f54cfcf6b3" +dependencies = [ + "borsh", + "serde", +] + +[[package]] +name = "near-crypto" +version = "0.0.0" +source = "git+https://github.com/near/nearcore#cb8b1d2c09f2126d4fd01f94b6bd24f54cfcf6b3" +dependencies = [ + "arrayref", + "blake2", + "borsh", + "bs58", + "c2-chacha", + "curve25519-dalek", + "derive_more", + "ed25519-dalek", + "libc", + "near-account-id", + "once_cell", + "parity-secp256k1", + "primitive-types", + "rand 0.7.3", + "rand_core 0.5.1", + "serde", + "serde_json", + "subtle", + "thiserror", +] + +[[package]] +name = "near-primitives" +version = "0.0.0" +source = "git+https://github.com/near/nearcore#cb8b1d2c09f2126d4fd01f94b6bd24f54cfcf6b3" +dependencies = [ + "borsh", + "byteorder", + "bytesize", + "chrono", + "derive_more", + "easy-ext", + "hex", + "near-crypto", + "near-primitives-core", + "near-rpc-error-macro", + "near-vm-errors", + "num-rational", + "primitive-types", + "rand 0.7.3", + "reed-solomon-erasure", + "serde", + "serde_json", + "smart-default", +] + +[[package]] +name = "near-primitives-core" +version = "0.0.0" +source = "git+https://github.com/near/nearcore#cb8b1d2c09f2126d4fd01f94b6bd24f54cfcf6b3" +dependencies = [ + "base64", + "borsh", + "bs58", + "derive_more", + "near-account-id", + "num-rational", + "serde", + "sha2", +] + +[[package]] +name = "near-rpc-error-core" +version = "0.0.0" +source = "git+https://github.com/near/nearcore#cb8b1d2c09f2126d4fd01f94b6bd24f54cfcf6b3" +dependencies = [ + "quote", + "serde", + "syn", +] + +[[package]] +name = "near-rpc-error-macro" +version = "0.0.0" +source = "git+https://github.com/near/nearcore#cb8b1d2c09f2126d4fd01f94b6bd24f54cfcf6b3" +dependencies = [ + "near-rpc-error-core", + "serde", + "syn", +] + +[[package]] +name = "near-vm-errors" +version = "0.0.0" +source = "git+https://github.com/near/nearcore#cb8b1d2c09f2126d4fd01f94b6bd24f54cfcf6b3" +dependencies = [ + "borsh", + "near-account-id", + "near-rpc-error-macro", + "serde", +] + +[[package]] +name = "nom" +version = "7.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" +dependencies = [ + "memchr", + "minimal-lexical", + "version_check", +] + +[[package]] +name = "num-bigint" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" +dependencies = [ + "autocfg", + "num-bigint", + "num-integer", + "num-traits", + "serde", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec 0.7.2", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" +dependencies = [ + "proc-macro-crate 1.1.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-secp256k1" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fca4f82fccae37e8bbdaeb949a4a218a1bbc485d11598f193d2a908042e5fc1" +dependencies = [ + "arrayvec 0.5.2", + "cc", + "cfg-if 0.1.10", + "rand 0.7.3", +] + +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + +[[package]] +name = "pin-project-lite" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" + +[[package]] +name = "ppv-lite86" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" + +[[package]] +name = "primitive-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-crate" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" +dependencies = [ + "thiserror", + "toml", +] + +[[package]] +name = "proc-macro2" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + +[[package]] +name = "rand" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.3", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.3", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom 0.2.4", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "redox_syscall" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +dependencies = [ + "bitflags", +] + +[[package]] +name = "reed-solomon-erasure" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a415a013dd7c5d4221382329a5a3482566da675737494935cbbbcdec04662f9d" +dependencies = [ + "smallvec", +] + +[[package]] +name = "regex" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +dependencies = [ + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rocksdb" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3" +dependencies = [ + "libc", + "librocksdb-sys", +] + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "ryu" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" + +[[package]] +name = "semver" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" + +[[package]] +name = "serde" +version = "1.0.133" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.133" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c059c05b48c5c0067d4b4b2b4f0732dd65feb52daf7e0ea09cd87e7dadc1af79" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer", + "cfg-if 1.0.0", + "cpufeatures", + "digest", + "opaque-debug", +] + +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + +[[package]] +name = "signature" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" + +[[package]] +name = "skw-vm-store" +version = "0.0.0" +dependencies = [ + "bencher", + "borsh", + "byteorder", + "bytesize", + "derive_more", + "elastic-array", + "fs2", + "hashbrown 0.12.0", + "lru", + "near-crypto", + "near-primitives", + "num_cpus", + "rand 0.7.3", + "rocksdb", + "serde_json", + "strum", + "tempfile", + "thiserror", + "tracing", +] + +[[package]] +name = "smallvec" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" + +[[package]] +name = "smart-default" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "133659a15339456eeeb07572eb02a91c91e9815e9cbc89566944d2c8d3efdbf6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strum" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7318c509b5ba57f18533982607f24070a55d353e90d4cae30c467cdb2ad5ac5c" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8bc6b87a5112aeeab1f4a9f7ab634fe6cbefc4850006df31267f4cfb9e3149" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "subtle" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" + +[[package]] +name = "syn" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if 1.0.0", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "thiserror" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "time" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "toml" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +dependencies = [ + "serde", +] + +[[package]] +name = "tracing" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +dependencies = [ + "cfg-if 1.0.0", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "typenum" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" + +[[package]] +name = "uint" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6470ab50f482bde894a037a57064480a246dbfdd5960bd65a44824693f08da5f" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unicode-segmentation" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.10.3+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46a2e384a3f170b0c7543787a91411175b71afd56ba4d3a0ae5678d4e2243c0e" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + +[[package]] +name = "zeroize" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc222aec311c323c717f56060324f32b82da1ce1dd81d9a09aa6a9030bfe08db" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81e8f13fef10b63c06356d65d416b070798ddabcadc10d3ece0c5be9b3c7eddb" dependencies = [ - "parity-wasm", - "spin", - "wasmi-validation", - "wasmi_core", + "proc-macro2", + "quote", + "syn", + "synstructure", ] diff --git a/mock-enclave/src/skw-vm-store/Cargo.toml b/mock-enclave/src/skw-vm-store/Cargo.toml index 5697f65..a635b88 100644 --- a/mock-enclave/src/skw-vm-store/Cargo.toml +++ b/mock-enclave/src/skw-vm-store/Cargo.toml @@ -1,25 +1,49 @@ [package] -name = "skw-vm-primitives" -version = "0.1.0" -authors = ["SkyeKiwi "] -edition = "2021" +name = "skw-vm-store" +version = "0.0.0" license = "GPL-3.0" -readme = "README.md" -categories = ["wasm"] -repository = "https://github.com/skyekiwi/skyekiwi-network" -homepage = "https://github.com/skyekiwi/skyekiwi-network" -description = """ -Primitives types for the SkyeKiwi Network Secret Contracts -""" +authors = ["SkyeKiwi ", "Near Inc "] +publish = false +edition = "2021" [dependencies] -hashbrown = "0.12" -skw-vm-primitives = { path = "../skw-vm-primitives" } +byteorder = "1.2" +bytesize = "1.1" +derive_more = "0.99.3" +elastic-array = "0.11" +rocksdb = "0.16.0" +serde_json = "1" +num_cpus = "1.11" +rand = "0.7" +strum = { version = "0.20", features = ["derive"] } +fs2 = "0.4" +tracing = "0.1" +borsh = "0.9" +thiserror = "1" +lru = "0.6.5" +hashbrown = "0.12.0" + +near-crypto = { git = "https://github.com/near/nearcore" } +near-primitives = { git = "https://github.com/near/nearcore" } + +[dev-dependencies] +tempfile = "3" +bencher = "0.1.5" +rand = "0.7" + +[[bench]] +name = "trie_bench" +harness = false + +[[bench]] +name = "store_bench" +harness = false [features] -default = ["std"] -std = [ - "sha2/std", - "num-rational/std", - "serde/std", -] +default = [] +no_cache = [] +single_thread_rocksdb = [] # Deactivate RocksDB IO background threads +test_features = [] +protocol_feature_chunk_only_producers = [] +nightly_protocol = [] +nightly_protocol_features = ["nightly_protocol", "protocol_feature_chunk_only_producers"] diff --git a/mock-enclave/src/skw-vm-store/README.md b/mock-enclave/src/skw-vm-store/README.md deleted file mode 100644 index 90c4a2d..0000000 --- a/mock-enclave/src/skw-vm-store/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# near-vm-errors - -Error that can occur inside Near Runtime encapsulated in a separate crate. Might merge it later. diff --git a/mock-enclave/src/skw-vm-store/benches/store_bench.rs b/mock-enclave/src/skw-vm-store/benches/store_bench.rs new file mode 100644 index 0000000..20a6727 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/benches/store_bench.rs @@ -0,0 +1,100 @@ +#[macro_use] +extern crate bencher; + +use bencher::{black_box, Bencher}; +use near_primitives::borsh::maybestd::sync::Arc; +use near_primitives::errors::StorageError; +use skw_vm_store::db::DBCol::ColBlockMerkleTree; +use skw_vm_store::{create_store, DBCol, Store}; +use std::time::{Duration, Instant}; + +/// Run a benchmark to generate `num_keys` keys, each of size `key_size`, then write then +/// in random order to column `col` in store, and then read keys back from `col` in random order. +/// Works only for column configured without reference counting, that is `.is_rc() == false`. +fn benchmark_write_then_read_successful( + bench: &mut Bencher, + num_keys: usize, + key_size: usize, + max_value_size: usize, + col: DBCol, +) { + let store = create_store_in_random_folder(); + let keys = generate_keys(num_keys, key_size); + write_to_db(&store, &keys, max_value_size, col); + + bench.iter(move || { + let start = Instant::now(); + + let read_records = read_from_db(&store, &keys, col); + let took = start.elapsed(); + println!( + "took on avg {:?} op per sec {} got {}/{}", + took / (num_keys as u32), + (num_keys as u128) * Duration::from_secs(1).as_nanos() / took.as_nanos(), + read_records, + keys.len() + ); + }); +} + +/// Create `Store` in a random folder. +fn create_store_in_random_folder() -> Arc { + let tmp_dir = tempfile::Builder::new().prefix("_test_clear_column").tempdir().unwrap(); + let store = create_store(tmp_dir.path()); + store +} + +/// Generate `count` keys of `key_size` length. +fn generate_keys(count: usize, key_size: usize) -> Vec> { + let mut res: Vec> = Vec::new(); + for _k in 0..count { + let key: Vec = (0..key_size).map(|_| rand::random::()).collect(); + + res.push(key) + } + res +} + +/// Read from DB value for given `kyes` in random order for `col`. +/// Works only for column configured without reference counting, that is `.is_rc() == false`. +fn read_from_db(store: &Arc, keys: &Vec>, col: DBCol) -> usize { + let mut read = 0; + for _k in 0..keys.len() { + let r = rand::random::() % (keys.len() as u32); + let key = &keys[r as usize]; + + let val = store.get(col, key.as_ref()).map_err(|_| StorageError::StorageInternalError); + + if let Ok(Some(x)) = val { + black_box(x); + read += 1; + } + } + read +} + +/// Write random value of size between `0` and `max_value_size` to given `keys` at specific column +/// `col.` +/// Works only for column configured without reference counting, that is `.is_rc() == false`. +fn write_to_db(store: &Arc, keys: &[Vec], max_value_size: usize, col: DBCol) { + let mut store_update = store.store_update(); + for key in keys.iter() { + let x: usize = rand::random::() % max_value_size; + let val: Vec = (0..x).map(|_| rand::random::()).collect(); + // NOTE: this + store_update.set(col, key.as_slice().clone(), &val); + } + store_update.commit().unwrap(); +} + +fn benchmark_write_then_read_successful_10m(bench: &mut Bencher) { + // By adding logs, I've seen a lot of write to keys with size 40, an values with sizes + // between 10 .. 333. + // NOTE: ColBlockMerkleTree was chosen to be a column, where `.is_rc() == false`. + // benchmark_write_then_read_successful(bench, 10_000_000, 40, 333, ColBlockMerkleTree); + benchmark_write_then_read_successful(bench, 10_000, 40, 333, ColBlockMerkleTree); +} + +benchmark_group!(benches, benchmark_write_then_read_successful_10m); + +benchmark_main!(benches); diff --git a/mock-enclave/src/skw-vm-store/benches/trie_bench.rs b/mock-enclave/src/skw-vm-store/benches/trie_bench.rs new file mode 100644 index 0000000..97d6750 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/benches/trie_bench.rs @@ -0,0 +1,53 @@ +#[macro_use] +extern crate bencher; + +use bencher::Bencher; +use rand::random; + +use near_primitives::shard_layout::ShardUId; +use skw_vm_store::test_utils::create_tries; +use skw_vm_store::Trie; + +fn rand_bytes() -> Vec { + (0..10).map(|_| random::()).collect() +} + +fn trie_lookup(bench: &mut Bencher) { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let root = Trie::empty_root(); + let mut changes = vec![]; + for _ in 0..100 { + changes.push((rand_bytes(), Some(rand_bytes()))); + } + let other_changes = changes.clone(); + let trie_changes = trie.update(&root, changes.drain(..)).unwrap(); + let (state_update, root) = tries.apply_all(&trie_changes, ShardUId::single_shard()).unwrap(); + state_update.commit().expect("Failed to commit"); + + bench.iter(|| { + for _ in 0..1 { + for (key, _) in other_changes.iter() { + trie.get(&root, key).unwrap(); + } + } + }); +} + +fn trie_update(bench: &mut Bencher) { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let root = Trie::empty_root(); + let mut changes = vec![]; + for _ in 0..100 { + changes.push((rand_bytes(), Some(rand_bytes()))); + } + + bench.iter(|| { + let mut this_changes = changes.clone(); + let _ = trie.update(&root, this_changes.drain(..)); + }); +} + +benchmark_group!(benches, trie_lookup, trie_update); +benchmark_main!(benches); diff --git a/mock-enclave/src/skw-vm-store/src/db.rs b/mock-enclave/src/skw-vm-store/src/db.rs new file mode 100644 index 0000000..ec1259e --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/db.rs @@ -0,0 +1,966 @@ +#[cfg(not(feature = "single_thread_rocksdb"))] +use std::cmp; +use std::collections::HashMap; +use std::io; +use std::marker::PhantomPinned; +use std::sync::RwLock; + +use borsh::{BorshDeserialize, BorshSerialize}; +use rocksdb::{ + BlockBasedOptions, Cache, ColumnFamily, ColumnFamilyDescriptor, Direction, Env, IteratorMode, + Options, ReadOptions, WriteBatch, DB, +}; +use strum::EnumIter; +use tracing::warn; + +use near_primitives::version::DbVersion; + +use crate::db::refcount::merge_refcounted_records; + +use std::path::Path; +use std::sync::atomic::Ordering; + +pub(crate) mod refcount; +pub(crate) mod v6_to_v7; + +#[derive(Debug, Clone, PartialEq)] +pub struct DBError(rocksdb::Error); + +impl std::fmt::Display for DBError { + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + self.0.fmt(formatter) + } +} + +impl std::error::Error for DBError {} + +impl From for DBError { + fn from(err: rocksdb::Error) -> Self { + DBError(err) + } +} + +impl Into for DBError { + fn into(self) -> io::Error { + io::Error::new(io::ErrorKind::Other, self) + } +} + +/// This enum holds the information about the columns that we use within the RocksDB storage. +/// You can think about our storage as 2-dimensional table (with key and column as indexes/coordinates). +// TODO(mm-near): add info about the RC in the columns. +#[derive(PartialEq, Debug, Copy, Clone, EnumIter, BorshDeserialize, BorshSerialize, Hash, Eq)] +pub enum DBCol { + /// Column to indicate which version of database this is. + /// - *Rows*: single row [VERSION_KEY] + /// - *Content type*: The version of the database (u32), serialized as JSON. + ColDbVersion = 0, + /// Column that store Misc cells. + /// - *Rows*: multiple, for example "GENESIS_JSON_HASH", "HEAD_KEY", [LATEST_KNOWN_KEY] etc. + /// - *Content type*: cell specific. + ColBlockMisc = 1, + /// Column that stores Block content. + /// - *Rows*: block hash (CryptHash) + /// - *Content type*: [near_primitives::block::Block] + ColBlock = 2, + /// Column that stores Block headers. + /// - *Rows*: block hash (CryptoHash) + /// - *Content type*: [near_primitives::block_header::BlockHeader] + ColBlockHeader = 3, + /// Column that stores mapping from block height to block hash. + /// - *Rows*: height (u64) + /// - *Content type*: block hash (CryptoHash) + ColBlockHeight = 4, + /// Column that stores the Trie state. + /// - *Rows*: trie_node_or_value_hash (CryptoHash) + /// - *Content type*: Serializd RawTrieNodeWithSize or value () + ColState = 5, + /// Mapping from BlockChunk to ChunkExtra + /// - *Rows*: BlockChunk (block_hash, shard_uid) + /// - *Content type*: [near_primitives::types::ChunkExtra] + ColChunkExtra = 6, + /// Mapping from transaction outcome id (CryptoHash) to list of outcome ids with proofs. + /// - *Rows*: outcome id (CryptoHash) + /// - *Content type*: Vec of [near_primitives::transactions::ExecutionOutcomeWithIdAndProof] + ColTransactionResult = 7, + /// Mapping from Block + Shard to list of outgoing receipts. + /// - *Rows*: block + shard + /// - *Content type*: Vec of [near_primitives::receipt::Receipt] + ColOutgoingReceipts = 8, + /// Mapping from Block + Shard to list of incoming receipt proofs. + /// Each proof might prove multiple receipts. + /// - *Rows*: (block, shard) + /// - *Content type*: Vec of [near_primitives::sharding::ReceiptProof] + ColIncomingReceipts = 9, + /// Info about the peers that we are connected to. Mapping from peer_id to KnownPeerState. + /// - *Rows*: peer_id (PublicKey) + /// - *Content type*: [network_primitives::types::KnownPeerState] + ColPeers = 10, + /// Mapping from EpochId to EpochInfo + /// - *Rows*: EpochId (CryptoHash) + /// - *Content type*: [near_primitives::epoch_manager::EpochInfo] + ColEpochInfo = 11, + /// Mapping from BlockHash to BlockInfo + /// - *Rows*: BlockHash (CryptoHash) + /// - *Content type*: [near_primitives::epoch_manager::BlockInfo] + ColBlockInfo = 12, + /// Mapping from ChunkHash to ShardChunk. + /// - *Rows*: ChunkHash (CryptoHash) + /// - *Content type*: [near_primitives::sharding::ShardChunk] + ColChunks = 13, + /// Storage for PartialEncodedChunk. + /// - *Rows*: ChunkHash (CryptoHash) + /// - *Content type*: [near_primitives::sharding::PartialEncodedChunkV1] + ColPartialChunks = 14, + /// Blocks for which chunks need to be applied after the state is downloaded for a particular epoch + /// TODO: describe what is exactly inside the rows/cells. + ColBlocksToCatchup = 15, + /// Blocks for which the state is being downloaded + ColStateDlInfos = 16, + ColChallengedBlocks = 17, + ColStateHeaders = 18, + ColInvalidChunks = 19, + ColBlockExtra = 20, + /// Store hash of a block per each height, to detect double signs. + ColBlockPerHeight = 21, + ColStateParts = 22, + ColEpochStart = 23, + /// Map account_id to announce_account + ColAccountAnnouncements = 24, + /// Next block hashes in the sequence of the canonical chain blocks + ColNextBlockHashes = 25, + /// `LightClientBlock`s corresponding to the last final block of each completed epoch + ColEpochLightClientBlocks = 26, + ColReceiptIdToShardId = 27, + // Deprecated. + _ColNextBlockWithNewChunk = 28, + // Deprecated. + _ColLastBlockWithNewChunk = 29, + /// Network storage: + /// When given edge is removed (or we didn't get any ping from it for a while), we remove it from our 'in memory' + /// view and persist into storage. + /// + /// This is done, so that we prevent the attack, when someone tries to introduce the edge/peer again into the network, + /// but with the 'old' nonce. + /// + /// When we write things to storage, we do it in groups (here they are called 'components') - this naming is a little bit + /// unfortunate, as the peers/edges that we persist don't need to be connected or form any other 'component' (in a graph theory sense). + /// + /// Each such component gets a new identifier (here called 'nonce'). + /// + /// We store this info in the three columns below: + /// - LastComponentNonce: keeps info on what is the next identifier (nonce) that can be used. + /// - PeerComponent: keep information on mapping from the peer to the last component that it belonged to (so that if a new peer shows + /// up we know which 'component' to load) + /// - ComponentEdges: keep the info about the edges that were connecting these peers that were removed. + + /// Map each saved peer on disk with its component id (a.k.a. nonce). + /// - *Rows*: peer_id + /// - *Column type*: (nonce) u64 + ColPeerComponent = 30, + /// Map component id (a.k.a. nonce) with all edges in this component. + /// These are all the edges that were purged and persisted to disk at the same time. + /// - *Rows*: nonce + /// - *Column type*: `Vec` + ColComponentEdges = 31, + /// Biggest component id (a.k.a nonce) used. + /// - *Rows*: single row (empty row name) + /// - *Column type*: (nonce) u64 + ColLastComponentNonce = 32, + /// Map of transactions + /// - *Rows*: transaction hash + /// - *Column type*: SignedTransaction + ColTransactions = 33, + ColChunkPerHeightShard = 34, + /// Changes to key-values that we have recorded. + ColStateChanges = 35, + ColBlockRefCount = 36, + ColTrieChanges = 37, + /// Merkle tree of block hashes + ColBlockMerkleTree = 38, + ColChunkHashesByHeight = 39, + /// Block ordinals. + ColBlockOrdinal = 40, + /// GC Count for each column + ColGCCount = 41, + /// All Outcome ids by block hash and shard id. For each shard it is ordered by execution order. + ColOutcomeIds = 42, + /// Deprecated + _ColTransactionRefCount = 43, + /// Heights of blocks that have been processed + ColProcessedBlockHeights = 44, + /// Receipts + ColReceipts = 45, + /// Precompiled machine code of the contract + ColCachedContractCode = 46, + /// Epoch validator information used for rpc purposes + ColEpochValidatorInfo = 47, + /// Header Hashes indexed by Height + ColHeaderHashesByHeight = 48, + /// State changes made by a chunk, used for splitting states + ColStateChangesForSplitStates = 49, +} + +// Do not move this line from enum DBCol +pub const NUM_COLS: usize = 50; + +impl std::fmt::Display for DBCol { + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + let desc = match self { + Self::ColDbVersion => "db version", + Self::ColBlockMisc => "miscellaneous block data", + Self::ColBlock => "block data", + Self::ColBlockHeader => "block header data", + Self::ColBlockHeight => "block height", + Self::ColState => "blockchain state", + Self::ColChunkExtra => "extra information of trunk", + Self::ColTransactionResult => "transaction results", + Self::ColOutgoingReceipts => "outgoing receipts", + Self::ColIncomingReceipts => "incoming receipts", + Self::ColPeers => "peer information", + Self::ColEpochInfo => "epoch information", + Self::ColBlockInfo => "block information", + Self::ColChunks => "chunks", + Self::ColPartialChunks => "partial chunks", + Self::ColBlocksToCatchup => "blocks need to apply chunks", + Self::ColStateDlInfos => "blocks downloading", + Self::ColChallengedBlocks => "challenged blocks", + Self::ColStateHeaders => "state headers", + Self::ColInvalidChunks => "invalid chunks", + Self::ColBlockExtra => "extra block information", + Self::ColBlockPerHeight => "hash of block per height", + Self::ColStateParts => "state parts", + Self::ColEpochStart => "epoch start", + Self::ColAccountAnnouncements => "account announcements", + Self::ColNextBlockHashes => "next block hash", + Self::ColEpochLightClientBlocks => "epoch light client block", + Self::ColReceiptIdToShardId => "receipt id to shard id", + Self::_ColNextBlockWithNewChunk => "next block with new chunk (deprecated)", + Self::_ColLastBlockWithNewChunk => "last block with new chunk (deprecated)", + Self::ColPeerComponent => "peer components", + Self::ColComponentEdges => "component edges", + Self::ColLastComponentNonce => "last component nonce", + Self::ColTransactions => "transactions", + Self::ColChunkPerHeightShard => "hash of chunk per height and shard_id", + Self::ColStateChanges => "key value changes", + Self::ColBlockRefCount => "refcount per block", + Self::ColTrieChanges => "trie changes", + Self::ColBlockMerkleTree => "block merkle tree", + Self::ColChunkHashesByHeight => "chunk hashes indexed by height_created", + Self::ColBlockOrdinal => "block ordinal", + Self::ColGCCount => "gc count", + Self::ColOutcomeIds => "outcome ids", + Self::_ColTransactionRefCount => "refcount per transaction (deprecated)", + Self::ColProcessedBlockHeights => "processed block heights", + Self::ColReceipts => "receipts", + Self::ColCachedContractCode => "cached code", + Self::ColEpochValidatorInfo => "epoch validator info", + Self::ColHeaderHashesByHeight => "header hashes indexed by their height", + Self::ColStateChangesForSplitStates => { + "state changes indexed by block hash and shard id" + } + }; + write!(formatter, "{}", desc) + } +} + +impl DBCol { + pub fn is_rc(&self) -> bool { + IS_COL_RC[*self as usize] + } +} + +// List of columns for which GC should be implemented +pub static SHOULD_COL_GC: [bool; NUM_COLS] = { + let mut col_gc = [true; NUM_COLS]; + col_gc[DBCol::ColDbVersion as usize] = false; // DB version is unrelated to GC + col_gc[DBCol::ColBlockMisc as usize] = false; + // TODO #3488 remove + col_gc[DBCol::ColBlockHeader as usize] = false; // header sync needs headers + col_gc[DBCol::ColGCCount as usize] = false; // GC count it self isn't GCed + col_gc[DBCol::ColBlockHeight as usize] = false; // block sync needs it + genesis should be accessible + col_gc[DBCol::ColPeers as usize] = false; // Peers is unrelated to GC + col_gc[DBCol::ColBlockMerkleTree as usize] = false; + col_gc[DBCol::ColAccountAnnouncements as usize] = false; + col_gc[DBCol::ColEpochLightClientBlocks as usize] = false; + col_gc[DBCol::ColPeerComponent as usize] = false; // Peer related info doesn't GC + col_gc[DBCol::ColLastComponentNonce as usize] = false; + col_gc[DBCol::ColComponentEdges as usize] = false; + col_gc[DBCol::ColBlockOrdinal as usize] = false; + col_gc[DBCol::ColEpochInfo as usize] = false; // https://github.com/nearprotocol/nearcore/pull/2952 + col_gc[DBCol::ColEpochValidatorInfo as usize] = false; // https://github.com/nearprotocol/nearcore/pull/2952 + col_gc[DBCol::ColEpochStart as usize] = false; // https://github.com/nearprotocol/nearcore/pull/2952 + col_gc[DBCol::ColCachedContractCode as usize] = false; + col_gc +}; + +// List of columns for which GC may not be executed even in fully operational node + +pub static SKIP_COL_GC: [bool; NUM_COLS] = { + let mut col_gc = [false; NUM_COLS]; + // A node may never restarted + col_gc[DBCol::ColStateHeaders as usize] = true; + // True until #2515 + col_gc[DBCol::ColStateParts as usize] = true; + col_gc +}; + +// List of reference counted columns + +pub static IS_COL_RC: [bool; NUM_COLS] = { + let mut col_rc = [false; NUM_COLS]; + col_rc[DBCol::ColState as usize] = true; + col_rc[DBCol::ColTransactions as usize] = true; + col_rc[DBCol::ColReceipts as usize] = true; + col_rc[DBCol::ColReceiptIdToShardId as usize] = true; + col_rc +}; + +pub const HEAD_KEY: &[u8; 4] = b"HEAD"; +pub const TAIL_KEY: &[u8; 4] = b"TAIL"; +pub const CHUNK_TAIL_KEY: &[u8; 10] = b"CHUNK_TAIL"; +pub const FORK_TAIL_KEY: &[u8; 9] = b"FORK_TAIL"; +pub const HEADER_HEAD_KEY: &[u8; 11] = b"HEADER_HEAD"; +pub const FINAL_HEAD_KEY: &[u8; 10] = b"FINAL_HEAD"; +pub const LATEST_KNOWN_KEY: &[u8; 12] = b"LATEST_KNOWN"; +pub const LARGEST_TARGET_HEIGHT_KEY: &[u8; 21] = b"LARGEST_TARGET_HEIGHT"; +pub const VERSION_KEY: &[u8; 7] = b"VERSION"; +pub const GENESIS_JSON_HASH_KEY: &[u8; 17] = b"GENESIS_JSON_HASH"; +pub const GENESIS_STATE_ROOTS_KEY: &[u8; 19] = b"GENESIS_STATE_ROOTS"; + +pub struct DBTransaction { + pub ops: Vec, +} + +pub enum DBOp { + Insert { col: DBCol, key: Vec, value: Vec }, + UpdateRefcount { col: DBCol, key: Vec, value: Vec }, + Delete { col: DBCol, key: Vec }, + DeleteAll { col: DBCol }, +} + +impl DBTransaction { + pub fn put, V: AsRef<[u8]>>(&mut self, col: DBCol, key: K, value: V) { + self.ops.push(DBOp::Insert { + col, + key: key.as_ref().to_owned(), + value: value.as_ref().to_owned(), + }); + } + + pub fn update_refcount, V: AsRef<[u8]>>( + &mut self, + col: DBCol, + key: K, + value: V, + ) { + self.ops.push(DBOp::UpdateRefcount { + col, + key: key.as_ref().to_owned(), + value: value.as_ref().to_owned(), + }); + } + + pub fn delete>(&mut self, col: DBCol, key: K) { + self.ops.push(DBOp::Delete { col, key: key.as_ref().to_owned() }); + } + + pub fn delete_all(&mut self, col: DBCol) { + self.ops.push(DBOp::DeleteAll { col }); + } +} + +pub struct RocksDB { + db: DB, + cfs: Vec<*const ColumnFamily>, + + check_free_space_counter: std::sync::atomic::AtomicU16, + check_free_space_interval: u16, + free_space_threshold: bytesize::ByteSize, + + _pin: PhantomPinned, +} + +// DB was already Send+Sync. cf and read_options are const pointers using only functions in +// this file and safe to share across threads. +unsafe impl Send for RocksDB {} +unsafe impl Sync for RocksDB {} + +/// Options for configuring [`RocksDB`](RocksDB). +/// +/// ```rust +/// use near_store::db::RocksDBOptions; +/// +/// let rocksdb = RocksDBOptions::default() +/// .check_free_space_interval(256) +/// .free_disk_space_threshold(bytesize::ByteSize::mb(10)) +/// .read_only("/db/path"); +/// ``` +pub struct RocksDBOptions { + cf_names: Option>, + cf_descriptors: Option>, + + rocksdb_options: Option, + check_free_space_interval: u16, + free_space_threshold: bytesize::ByteSize, + warn_treshold: bytesize::ByteSize, +} + +/// Sets [`RocksDBOptions::check_free_space_interval`] to 256, +/// [`RocksDBOptions::free_disk_space_threshold`] to 16 MB and +/// [`RocksDBOptions::free_disk_space_warn_threshold`] to 256 MB. +impl Default for RocksDBOptions { + fn default() -> Self { + RocksDBOptions { + cf_names: None, + cf_descriptors: None, + rocksdb_options: None, + check_free_space_interval: 256, + free_space_threshold: bytesize::ByteSize::mb(16), + warn_treshold: bytesize::ByteSize::mb(256), + } + } +} + +impl RocksDBOptions { + /// Once the disk space is below the `free_disk_space_warn_threshold`, RocksDB will emit an warning message every [`interval`](RocksDBOptions::check_free_space_interval) write. + pub fn free_disk_space_warn_threshold(mut self, warn_treshold: bytesize::ByteSize) -> Self { + self.warn_treshold = warn_treshold; + self + } + + pub fn cf_names(mut self, cf_names: Vec) -> Self { + self.cf_names = Some(cf_names); + self + } + pub fn cf_descriptors(mut self, cf_descriptors: Vec) -> Self { + self.cf_descriptors = Some(cf_descriptors); + self + } + + pub fn rocksdb_options(mut self, rocksdb_options: Options) -> Self { + self.rocksdb_options = Some(rocksdb_options); + self + } + + /// After n writes, the free memory in the database's data directory is checked. + pub fn check_free_space_interval(mut self, interval: u16) -> Self { + self.check_free_space_interval = interval; + self + } + + /// Free space threshold. If the directory has fewer available bytes left, writing will not be + /// allowed to ensure recoverability. + pub fn free_disk_space_threshold(mut self, threshold: bytesize::ByteSize) -> Self { + self.free_space_threshold = threshold; + self + } + + /// Opens a read only database. + pub fn read_only>(self, path: P) -> Result { + let options = self.rocksdb_options.unwrap_or_default(); + let cf_names: Vec<_> = self.cf_names.unwrap_or_else(|| vec!["col0".to_string()]); + let db = DB::open_cf_for_read_only(&options, path, cf_names.iter(), false)?; + let cfs = + cf_names.iter().map(|n| db.cf_handle(n).unwrap() as *const ColumnFamily).collect(); + Ok(RocksDB { + db, + cfs, + _pin: PhantomPinned, + check_free_space_interval: self.check_free_space_interval, + check_free_space_counter: std::sync::atomic::AtomicU16::new(0), + free_space_threshold: self.free_space_threshold, + }) + } + + /// Opens the database in read/write mode. + pub fn read_write>(self, path: P) -> Result { + use strum::IntoEnumIterator; + let options = self.rocksdb_options.unwrap_or_else(rocksdb_options); + let cf_names = self + .cf_names + .unwrap_or_else(|| DBCol::iter().map(|col| format!("col{}", col as usize)).collect()); + let cf_descriptors = self.cf_descriptors.unwrap_or_else(|| { + DBCol::iter() + .map(|col| { + ColumnFamilyDescriptor::new( + format!("col{}", col as usize), + rocksdb_column_options(col), + ) + }) + .collect() + }); + let db = DB::open_cf_descriptors(&options, path, cf_descriptors)?; + #[cfg(feature = "single_thread_rocksdb")] + { + // These have to be set after open db + let mut env = Env::default().unwrap(); + env.set_bottom_priority_background_threads(0); + env.set_high_priority_background_threads(0); + env.set_low_priority_background_threads(0); + env.set_background_threads(0); + println!("Disabled all background threads in rocksdb"); + } + let cfs = + cf_names.iter().map(|n| db.cf_handle(n).unwrap() as *const ColumnFamily).collect(); + Ok(RocksDB { + db, + cfs, + _pin: PhantomPinned, + check_free_space_interval: self.check_free_space_interval, + check_free_space_counter: std::sync::atomic::AtomicU16::new(0), + free_space_threshold: self.free_space_threshold, + }) + } +} + +pub struct TestDB { + db: RwLock, Vec>>>, +} + +pub trait Database: Sync + Send { + fn transaction(&self) -> DBTransaction { + DBTransaction { ops: Vec::new() } + } + fn get(&self, col: DBCol, key: &[u8]) -> Result>, DBError>; + fn iter<'a>(&'a self, column: DBCol) -> Box, Box<[u8]>)> + 'a>; + fn iter_without_rc_logic<'a>( + &'a self, + column: DBCol, + ) -> Box, Box<[u8]>)> + 'a>; + fn iter_prefix<'a>( + &'a self, + col: DBCol, + key_prefix: &'a [u8], + ) -> Box, Box<[u8]>)> + 'a>; + fn write(&self, batch: DBTransaction) -> Result<(), DBError>; + fn as_rocksdb(&self) -> Option<&RocksDB> { + None + } +} + +impl Database for RocksDB { + fn get(&self, col: DBCol, key: &[u8]) -> Result>, DBError> { + let read_options = rocksdb_read_options(); + let result = self.db.get_cf_opt(unsafe { &*self.cfs[col as usize] }, key, &read_options)?; + Ok(RocksDB::get_with_rc_logic(col, result)) + } + + fn iter_without_rc_logic<'a>( + &'a self, + col: DBCol, + ) -> Box, Box<[u8]>)> + 'a> { + let read_options = rocksdb_read_options(); + unsafe { + let cf_handle = &*self.cfs[col as usize]; + let iterator = self.db.iterator_cf_opt(cf_handle, read_options, IteratorMode::Start); + Box::new(iterator) + } + } + + fn iter<'a>(&'a self, col: DBCol) -> Box, Box<[u8]>)> + 'a> { + let read_options = rocksdb_read_options(); + unsafe { + let cf_handle = &*self.cfs[col as usize]; + let iterator = self.db.iterator_cf_opt(cf_handle, read_options, IteratorMode::Start); + RocksDB::iter_with_rc_logic(col, iterator) + } + } + + fn iter_prefix<'a>( + &'a self, + col: DBCol, + key_prefix: &'a [u8], + ) -> Box, Box<[u8]>)> + 'a> { + // NOTE: There is no Clone implementation for ReadOptions, so we cannot really reuse + // `self.read_options` here. + let mut read_options = rocksdb_read_options(); + read_options.set_prefix_same_as_start(true); + unsafe { + let cf_handle = &*self.cfs[col as usize]; + // This implementation is copied from RocksDB implementation of `prefix_iterator_cf` since + // there is no `prefix_iterator_cf_opt` method. + let iterator = self + .db + .iterator_cf_opt( + cf_handle, + read_options, + IteratorMode::From(key_prefix, Direction::Forward), + ) + .take_while(move |(key, _value)| key.starts_with(key_prefix)); + RocksDB::iter_with_rc_logic(col, iterator) + } + } + + fn write(&self, transaction: DBTransaction) -> Result<(), DBError> { + if let Err(check) = self.pre_write_check() { + if check.is_io() { + warn!("unable to verify remaing disk space: {:?}, continueing write without verifying (this may result in unrecoverable data loss if disk space is exceeded", check) + } else { + panic!("{:?}", check) + } + } + + let mut batch = WriteBatch::default(); + for op in transaction.ops { + match op { + DBOp::Insert { col, key, value } => unsafe { + batch.put_cf(&*self.cfs[col as usize], key, value); + }, + DBOp::UpdateRefcount { col, key, value } => unsafe { + assert!(col.is_rc()); + batch.merge_cf(&*self.cfs[col as usize], key, value); + }, + DBOp::Delete { col, key } => unsafe { + batch.delete_cf(&*self.cfs[col as usize], key); + }, + DBOp::DeleteAll { col } => { + let cf_handle = unsafe { &*self.cfs[col as usize] }; + let opt_first = self.db.iterator_cf(cf_handle, IteratorMode::Start).next(); + let opt_last = self.db.iterator_cf(cf_handle, IteratorMode::End).next(); + assert_eq!(opt_first.is_some(), opt_last.is_some()); + if let (Some((min_key, _)), Some((max_key, _))) = (opt_first, opt_last) { + batch.delete_range_cf(cf_handle, &min_key, &max_key); + // delete_range_cf deletes ["begin_key", "end_key"), so need one more delete + batch.delete_cf(cf_handle, max_key) + } + } + } + } + Ok(self.db.write(batch)?) + } + + fn as_rocksdb(&self) -> Option<&RocksDB> { + Some(self) + } +} + +impl Database for TestDB { + fn get(&self, col: DBCol, key: &[u8]) -> Result>, DBError> { + let result = self.db.read().unwrap()[col as usize].get(key).cloned(); + Ok(RocksDB::get_with_rc_logic(col, result)) + } + + fn iter<'a>(&'a self, col: DBCol) -> Box, Box<[u8]>)> + 'a> { + let iterator = self.iter_without_rc_logic(col); + RocksDB::iter_with_rc_logic(col, iterator) + } + + fn iter_without_rc_logic<'a>( + &'a self, + col: DBCol, + ) -> Box, Box<[u8]>)> + 'a> { + let iterator = self.db.read().unwrap()[col as usize] + .clone() + .into_iter() + .map(|(k, v)| (k.into_boxed_slice(), v.into_boxed_slice())); + Box::new(iterator) + } + + fn iter_prefix<'a>( + &'a self, + col: DBCol, + key_prefix: &'a [u8], + ) -> Box, Box<[u8]>)> + 'a> { + RocksDB::iter_with_rc_logic( + col, + self.iter(col).filter(move |(key, _value)| key.starts_with(key_prefix)), + ) + } + + fn write(&self, transaction: DBTransaction) -> Result<(), DBError> { + let mut db = self.db.write().unwrap(); + for op in transaction.ops { + match op { + DBOp::Insert { col, key, value } => { + db[col as usize].insert(key, value); + } + DBOp::UpdateRefcount { col, key, value } => { + let mut val = db[col as usize].get(&key).cloned().unwrap_or_default(); + merge_refcounted_records(&mut val, &value); + if !val.is_empty() { + db[col as usize].insert(key, val); + } else { + db[col as usize].remove(&key); + } + } + DBOp::Delete { col, key } => { + db[col as usize].remove(&key); + } + DBOp::DeleteAll { col } => db[col as usize].clear(), + }; + } + Ok(()) + } +} + +/// DB level options +fn rocksdb_options() -> Options { + let mut opts = Options::default(); + + opts.create_missing_column_families(true); + opts.create_if_missing(true); + opts.set_use_fsync(false); + opts.set_max_open_files(512); + opts.set_keep_log_file_num(1); + opts.set_bytes_per_sync(bytesize::MIB); + opts.set_write_buffer_size(256 * bytesize::MIB as usize); + opts.set_max_bytes_for_level_base(256 * bytesize::MIB); + #[cfg(not(feature = "single_thread_rocksdb"))] + { + opts.increase_parallelism(cmp::max(1, num_cpus::get() as i32 / 2)); + opts.set_max_total_wal_size(bytesize::GIB); + } + #[cfg(feature = "single_thread_rocksdb")] + { + opts.set_disable_auto_compactions(true); + opts.set_max_background_jobs(0); + opts.set_stats_dump_period_sec(0); + opts.set_stats_persist_period_sec(0); + opts.set_level_zero_slowdown_writes_trigger(-1); + opts.set_level_zero_file_num_compaction_trigger(-1); + opts.set_level_zero_stop_writes_trigger(100000000); + } + + opts +} + +fn rocksdb_read_options() -> ReadOptions { + let mut read_options = ReadOptions::default(); + read_options.set_verify_checksums(false); + read_options +} + +fn rocksdb_block_based_options(cache_size: usize) -> BlockBasedOptions { + let mut block_opts = BlockBasedOptions::default(); + block_opts.set_block_size(16 * bytesize::KIB as usize); + // We create block_cache for each of 47 columns, so the total cache size is 32 * 47 = 1504mb + block_opts.set_block_cache(&Cache::new_lru_cache(cache_size).unwrap()); + block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); + block_opts.set_cache_index_and_filter_blocks(true); + block_opts.set_bloom_filter(10, true); + block_opts +} + +// TODO(#5213) Use ByteSize package to represent sizes. +fn choose_cache_size(col: DBCol) -> usize { + match col { + DBCol::ColState => 512 * 1024 * 1024, + _ => 32 * 1024 * 1024, + } +} + +fn rocksdb_column_options(col: DBCol) -> Options { + let mut opts = Options::default(); + opts.set_level_compaction_dynamic_level_bytes(true); + let cache_size = choose_cache_size(col); + opts.set_block_based_table_factory(&rocksdb_block_based_options(cache_size)); + opts.optimize_level_style_compaction(128 * bytesize::MIB as usize); + opts.set_target_file_size_base(64 * bytesize::MIB); + opts.set_compression_per_level(&[]); + if col.is_rc() { + opts.set_merge_operator("refcount merge", RocksDB::refcount_merge, RocksDB::refcount_merge); + opts.set_compaction_filter("empty value filter", RocksDB::empty_value_compaction_filter); + } + opts +} + +impl RocksDB { + /// Returns version of the database state on disk. + pub fn get_version>(path: P) -> Result { + let db = RocksDB::new_read_only(path)?; + db.get(DBCol::ColDbVersion, VERSION_KEY).map(|result| { + serde_json::from_slice( + &result + .expect("Failed to find version in first column. Database must be corrupted."), + ) + .expect("Failed to parse version. Database must be corrupted.") + }) + } + + fn new_read_only>(path: P) -> Result { + RocksDBOptions::default().read_only(path) + } + + pub fn new>(path: P) -> Result { + RocksDBOptions::default().read_write(path) + } + + /// Checks if there is enough memory left to perform a write. Not having enough memory left can + /// lead to difficult to recover from state, thus a PreWriteCheckErr is pretty much + /// unrecoverable in most cases. + fn pre_write_check(&self) -> Result<(), PreWriteCheckErr> { + let counter = self.check_free_space_counter.fetch_add(1, Ordering::Relaxed); + if self.check_free_space_interval >= counter { + return Ok(()); + } + self.check_free_space_counter.swap(0, Ordering::Relaxed); + + let available = available_space(self.db.path())?; + + if available < 16_u64 * self.free_space_threshold { + warn!("remaining disk space is running low ({} left)", available); + } + + if available < self.free_space_threshold { + Err(PreWriteCheckErr::LowDiskSpace(available)) + } else { + Ok(()) + } + } +} + +fn available_space + std::fmt::Debug>( + path: P, +) -> std::io::Result { + let available = fs2::available_space(path)?; + Ok(bytesize::ByteSize::b(available)) +} + +#[derive(Debug, thiserror::Error)] +pub enum PreWriteCheckErr { + #[error("error checking filesystem: {0}")] + IO(#[from] std::io::Error), + #[error("low disk memory ({0} available)")] + LowDiskSpace(bytesize::ByteSize), +} + +impl PreWriteCheckErr { + pub fn is_io(&self) -> bool { + matches!(self, PreWriteCheckErr::IO(_)) + } + + pub fn is_low_disk_space(&self) -> bool { + matches!(self, PreWriteCheckErr::LowDiskSpace(_)) + } +} + +impl Drop for RocksDB { + fn drop(&mut self) { + if cfg!(feature = "single_thread_rocksdb") { + // RocksDB with only one thread stuck on wait some condition var + // Turn on additional threads to proceed + let mut env = Env::default().unwrap(); + env.set_background_threads(4); + } + self.db.cancel_all_background_work(true); + } +} + +impl TestDB { + pub fn new() -> Self { + let db: Vec<_> = (0..NUM_COLS).map(|_| hashbrown::HashMap::new()).collect(); + Self { db: RwLock::new(db) } + } +} + +#[cfg(test)] +mod tests { + use crate::db::DBCol::ColState; + use crate::db::{rocksdb_read_options, DBError, Database, RocksDB}; + use crate::{create_store, DBCol}; + + impl RocksDB { + #[cfg(not(feature = "single_thread_rocksdb"))] + fn compact(&self, col: DBCol) { + self.db.compact_range_cf( + unsafe { &*self.cfs[col as usize] }, + Option::<&[u8]>::None, + Option::<&[u8]>::None, + ); + } + + fn get_no_empty_filtering( + &self, + col: DBCol, + key: &[u8], + ) -> Result>, DBError> { + let read_options = rocksdb_read_options(); + let result = + self.db.get_cf_opt(unsafe { &*self.cfs[col as usize] }, key, &read_options)?; + Ok(result) + } + } + + #[test] + fn test_prewrite_check() { + let tmp_dir = tempfile::Builder::new().prefix("_test_prewrite_check").tempdir().unwrap(); + let store = RocksDB::new(tmp_dir).unwrap(); + store.pre_write_check().unwrap() + } + + #[test] + fn test_clear_column() { + let tmp_dir = tempfile::Builder::new().prefix("_test_clear_column").tempdir().unwrap(); + let store = create_store(tmp_dir.path()); + assert_eq!(store.get(ColState, &[1]).unwrap(), None); + { + let mut store_update = store.store_update(); + store_update.update_refcount(ColState, &[1], &[1], 1); + store_update.update_refcount(ColState, &[2], &[2], 1); + store_update.update_refcount(ColState, &[3], &[3], 1); + store_update.commit().unwrap(); + } + assert_eq!(store.get(ColState, &[1]).unwrap(), Some(vec![1])); + { + let mut store_update = store.store_update(); + store_update.delete_all(ColState); + store_update.commit().unwrap(); + } + assert_eq!(store.get(ColState, &[1]).unwrap(), None); + } + + #[test] + fn rocksdb_merge_sanity() { + let tmp_dir = tempfile::Builder::new().prefix("_test_snapshot_sanity").tempdir().unwrap(); + let store = create_store(tmp_dir.path()); + let ptr = (&*store.storage) as *const (dyn Database + 'static); + let rocksdb = unsafe { &*(ptr as *const RocksDB) }; + assert_eq!(store.get(ColState, &[1]).unwrap(), None); + { + let mut store_update = store.store_update(); + store_update.update_refcount(ColState, &[1], &[1], 1); + store_update.commit().unwrap(); + } + { + let mut store_update = store.store_update(); + store_update.update_refcount(ColState, &[1], &[1], 1); + store_update.commit().unwrap(); + } + assert_eq!(store.get(ColState, &[1]).unwrap(), Some(vec![1])); + assert_eq!( + rocksdb.get_no_empty_filtering(ColState, &[1]).unwrap(), + Some(vec![1, 2, 0, 0, 0, 0, 0, 0, 0]) + ); + { + let mut store_update = store.store_update(); + store_update.update_refcount(ColState, &[1], &[1], -1); + store_update.commit().unwrap(); + } + assert_eq!(store.get(ColState, &[1]).unwrap(), Some(vec![1])); + assert_eq!( + rocksdb.get_no_empty_filtering(ColState, &[1]).unwrap(), + Some(vec![1, 1, 0, 0, 0, 0, 0, 0, 0]) + ); + { + let mut store_update = store.store_update(); + store_update.update_refcount(ColState, &[1], &[1], -1); + store_update.commit().unwrap(); + } + // Refcount goes to 0 -> get() returns None + assert_eq!(store.get(ColState, &[1]).unwrap(), None); + // Internally there is an empty value + assert_eq!(rocksdb.get_no_empty_filtering(ColState, &[1]).unwrap(), Some(vec![])); + + #[cfg(not(feature = "single_thread_rocksdb"))] + { + // single_thread_rocksdb makes compact hang forever + rocksdb.compact(ColState); + rocksdb.compact(ColState); + + // After compaction the empty value disappears + assert_eq!(rocksdb.get_no_empty_filtering(ColState, &[1]).unwrap(), None); + assert_eq!(store.get(ColState, &[1]).unwrap(), None); + } + } +} diff --git a/mock-enclave/src/skw-vm-store/src/db/refcount.rs b/mock-enclave/src/skw-vm-store/src/db/refcount.rs new file mode 100644 index 0000000..f0320e9 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/db/refcount.rs @@ -0,0 +1,138 @@ +use std::cmp::Ordering; +use std::io::{Cursor, Write}; + +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use rocksdb::compaction_filter::Decision; +use rocksdb::MergeOperands; + +use crate::db::RocksDB; +use crate::DBCol; + +/// Refcounted columns store value with rc. +/// +/// Write write refcount records, reads merge them. +/// The merged rc should always be positive, if it's not there is a bug in gc. +/// +/// Refcount record format: +/// rc = 0 => empty +/// rc < 0 => 8 bytes little endian rc +/// rc > 0 => value followed by 8 bytes little endian rc +/// +pub(crate) fn merge_refcounted_records(result: &mut Vec, val: &[u8]) { + let (bytes, add_rc) = decode_value_with_rc(val); + if add_rc == 0 { + return; + } + let (result_bytes, result_rc) = decode_value_with_rc(result); + if result_rc == 0 { + result.extend_from_slice(val); + } else { + let rc = result_rc + add_rc; + debug_assert!(result_rc <= 0 || add_rc <= 0 || result_bytes == bytes); + match rc.cmp(&0) { + Ordering::Less => { + result.clear(); + result.extend_from_slice(&rc.to_le_bytes()); + } + Ordering::Equal => { + result.clear(); + } + Ordering::Greater => { + if result_rc < 0 { + result.clear(); + result.extend_from_slice(val); + } + let len = result.len(); + result[len - 8..].copy_from_slice(&rc.to_le_bytes()); + } + } + } +} + +/// Returns +/// (Some(value), rc) if rc > 0 +/// (None, rc) if rc <= 0 +pub fn decode_value_with_rc(bytes: &[u8]) -> (Option<&[u8]>, i64) { + if bytes.len() < 8 { + debug_assert!(bytes.is_empty()); + return (None, 0); + } + let mut cursor = Cursor::new(&bytes[bytes.len() - 8..]); + let rc = cursor.read_i64::().unwrap(); + if rc < 0 { + (None, rc) + } else { + (Some(&bytes[..bytes.len() - 8]), rc) + } +} + +pub(crate) fn encode_value_with_rc(data: &[u8], rc: i64) -> Vec { + if rc == 0 { + return vec![]; + } + let mut cursor = Cursor::new(Vec::with_capacity(data.len() + 8)); + if rc > 0 { + cursor.write_all(data).unwrap(); + } + cursor.write_i64::(rc).unwrap(); + cursor.into_inner() +} + +impl RocksDB { + /// ColState has refcounted values. + /// Merge adds refcounts, zero refcount becomes empty value. + /// Empty values get filtered by get methods, and removed by compaction. + pub(crate) fn refcount_merge( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &mut MergeOperands, + ) -> Option> { + let mut result = vec![]; + if let Some(val) = existing_val { + merge_refcounted_records(&mut result, val); + } + for val in operands { + merge_refcounted_records(&mut result, val); + } + Some(result) + } + + /// Compaction filter for ColState + pub(crate) fn empty_value_compaction_filter( + _level: u32, + _key: &[u8], + value: &[u8], + ) -> Decision { + if value.is_empty() { + Decision::Remove + } else { + Decision::Keep + } + } + + /// Treats empty value as no value and strips refcount + pub(crate) fn get_with_rc_logic(column: DBCol, value: Option>) -> Option> { + if column.is_rc() { + value.and_then(|vec| decode_value_with_rc(&vec).0.map(|v| v.to_vec())) + } else { + value + } + } + + /// Iterator treats empty value as no value and strips refcount + pub(crate) fn iter_with_rc_logic<'a, I>( + column: DBCol, + iterator: I, + ) -> Box, Box<[u8]>)> + 'a> + where + I: Iterator, Box<[u8]>)> + 'a, + { + if column.is_rc() { + Box::new(iterator.filter_map(|(k, v_rc)| { + decode_value_with_rc(&v_rc).0.map(|v| (k, v.to_vec().into_boxed_slice())) + })) + } else { + Box::new(iterator) + } + } +} diff --git a/mock-enclave/src/skw-vm-store/src/db/v6_to_v7.rs b/mock-enclave/src/skw-vm-store/src/db/v6_to_v7.rs new file mode 100644 index 0000000..1b169e9 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/db/v6_to_v7.rs @@ -0,0 +1,75 @@ +use std::io::Cursor; + +use byteorder::{LittleEndian, ReadBytesExt}; +use rocksdb::{ColumnFamilyDescriptor, MergeOperands, Options}; +use strum::IntoEnumIterator; + +use crate::db::{rocksdb_column_options, DBError, RocksDB, RocksDBOptions}; +use crate::DBCol; + +fn refcount_merge_v6( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &mut MergeOperands, +) -> Option> { + let mut result = vec![]; + if let Some(val) = existing_val { + merge_refcounted_records_v6(&mut result, val); + } + for val in operands { + merge_refcounted_records_v6(&mut result, val); + } + Some(result) +} + +fn vec_to_rc(bytes: &[u8]) -> i32 { + let mut cursor = Cursor::new(&bytes[bytes.len() - 4..]); + cursor.read_i32::().unwrap() +} + +fn merge_refcounted_records_v6(result: &mut Vec, val: &[u8]) { + if val.is_empty() { + return; + } + let add_rc = vec_to_rc(val); + if !result.is_empty() { + let result_rc = vec_to_rc(result) + add_rc; + + debug_assert_eq!(result[0..(result.len() - 4)], val[0..(val.len() - 4)]); + let len = result.len(); + result[(len - 4)..].copy_from_slice(&result_rc.to_le_bytes()); + if result_rc == 0 { + *result = vec![]; + } + } else { + *result = val.to_vec(); + } +} + +fn rocksdb_column_options_v6(col: DBCol) -> Options { + let mut opts = rocksdb_column_options(DBCol::ColDbVersion); + + if col == DBCol::ColState { + opts.set_merge_operator("refcount merge", refcount_merge_v6, refcount_merge_v6); + opts.set_compaction_filter("empty value filter", RocksDB::empty_value_compaction_filter); + } + opts +} + +impl RocksDB { + pub(crate) fn new_v6>(path: P) -> Result { + RocksDBOptions::default() + .cf_names(DBCol::iter().map(|col| format!("col{}", col as usize)).collect()) + .cf_descriptors( + DBCol::iter() + .map(|col| { + ColumnFamilyDescriptor::new( + format!("col{}", col as usize), + rocksdb_column_options_v6(col), + ) + }) + .collect(), + ) + .read_write(path) + } +} diff --git a/mock-enclave/src/skw-vm-store/src/errors.rs b/mock-enclave/src/skw-vm-store/src/errors.rs deleted file mode 100644 index e69de29..0000000 diff --git a/mock-enclave/src/skw-vm-store/src/lib.rs b/mock-enclave/src/skw-vm-store/src/lib.rs index 0f2f761..f29a702 100644 --- a/mock-enclave/src/skw-vm-store/src/lib.rs +++ b/mock-enclave/src/skw-vm-store/src/lib.rs @@ -1,64 +1,533 @@ -use hashbrown::HashMap; -use skw_vm_primitives::db_key::DBKey; -use skw_vm_primitives::{ - contract_runtime::ContractCode, - account_id: AccountId, +use std::fs::File; +use std::io::{BufReader, Read, Write}; +use std::ops::Deref; +use std::path::Path; +use std::pin::Pin; +use std::sync::Arc; +use std::{fmt, io}; + +use borsh::{BorshDeserialize, BorshSerialize}; +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use lru::LruCache; + +pub use db::DBCol::{self, *}; +pub use db::{ + CHUNK_TAIL_KEY, FINAL_HEAD_KEY, FORK_TAIL_KEY, HEADER_HEAD_KEY, HEAD_KEY, + LARGEST_TARGET_HEIGHT_KEY, LATEST_KNOWN_KEY, NUM_COLS, SHOULD_COL_GC, SKIP_COL_GC, TAIL_KEY, }; -use serde::{Serialize, Deserialize}; +use near_crypto::PublicKey; +use near_primitives::account::{AccessKey, Account}; +use near_primitives::contract::ContractCode; +pub use near_primitives::errors::StorageError; +use near_primitives::hash::CryptoHash; +use near_primitives::receipt::{DelayedReceiptIndices, Receipt, ReceivedData}; +use near_primitives::serialize::to_base; +pub use near_primitives::shard_layout::ShardUId; +use near_primitives::trie_key::{trie_key_parsers, TrieKey}; +use near_primitives::types::{AccountId, CompiledContractCache, StateRoot}; -#[derive(Debug, Serialize, Deserialize)] -pub struct Store(HashMap>); +pub use crate::db::refcount::decode_value_with_rc; +use crate::db::refcount::encode_value_with_rc; +use crate::db::{ + DBOp, DBTransaction, Database, TestDB, RocksDB, GENESIS_JSON_HASH_KEY, GENESIS_STATE_ROOTS_KEY, +}; +pub use crate::trie::{ + iterator::TrieIterator, split_state, update::TrieUpdate, update::TrieUpdateIterator, + update::TrieUpdateValuePtr, ApplyStatePartResult, KeyForStateChanges, PartialStorage, + ShardTries, Trie, TrieChanges, WrappedTrieChanges, +}; + +pub mod db; +pub mod migrations; +pub mod test_utils; +mod trie; + +#[derive(Clone)] +pub struct Store { + storage: Pin>, +} impl Store { - pub fn get(&self, key: &DBKey) -> Option> { - self.0.get(key) + pub fn new(storage: Pin>) -> Store { + Store { storage } + } + + pub fn get(&self, column: DBCol, key: &[u8]) -> Result>, io::Error> { + self.storage.get(column, key).map_err(|e| e.into()) + } + + pub fn get_ser( + &self, + column: DBCol, + key: &[u8], + ) -> Result, io::Error> { + match self.storage.get(column, key) { + Ok(Some(bytes)) => match T::try_from_slice(bytes.as_ref()) { + Ok(result) => Ok(Some(result)), + Err(e) => Err(e), + }, + Ok(None) => Ok(None), + Err(e) => Err(e.into()), + } + } + + pub fn exists(&self, column: DBCol, key: &[u8]) -> Result { + self.storage.get(column, key).map(|value| value.is_some()).map_err(|e| e.into()) + } + + pub fn store_update(&self) -> StoreUpdate { + StoreUpdate::new(self.storage.clone()) + } + + pub fn iter<'a>( + &'a self, + column: DBCol, + ) -> Box, Box<[u8]>)> + 'a> { + self.storage.iter(column) + } + + pub fn iter_without_rc_logic<'a>( + &'a self, + column: DBCol, + ) -> Box, Box<[u8]>)> + 'a> { + self.storage.iter_without_rc_logic(column) + } + + pub fn iter_prefix<'a>( + &'a self, + column: DBCol, + key_prefix: &'a [u8], + ) -> Box, Box<[u8]>)> + 'a> { + self.storage.iter_prefix(column, key_prefix) + } + + pub fn iter_prefix_ser<'a, T: BorshDeserialize>( + &'a self, + column: DBCol, + key_prefix: &'a [u8], + ) -> Box, T), io::Error>> + 'a> { + Box::new( + self.storage + .iter_prefix(column, key_prefix) + .map(|(key, value)| Ok((key.to_vec(), T::try_from_slice(value.as_ref())?))), + ) + } + + pub fn save_to_file(&self, column: DBCol, filename: &Path) -> Result<(), std::io::Error> { + let mut file = File::create(filename)?; + for (key, value) in self.storage.iter_without_rc_logic(column) { + file.write_u32::(key.len() as u32)?; + file.write_all(&key)?; + file.write_u32::(value.len() as u32)?; + file.write_all(&value)?; + } + Ok(()) + } + + pub fn load_from_file(&self, column: DBCol, filename: &Path) -> Result<(), std::io::Error> { + let file = File::open(filename)?; + let mut file = BufReader::new(file); + let mut transaction = self.storage.transaction(); + let mut key = Vec::new(); + let mut value = Vec::new(); + loop { + let key_len = match file.read_u32::() { + Ok(key_len) => key_len as usize, + Err(ref err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break, + Err(err) => return Err(err), + }; + key.resize(key_len, 0); + file.read_exact(&mut key)?; + + let value_len = file.read_u32::()? as usize; + value.resize(value_len, 0); + file.read_exact(&mut value)?; + + transaction.put(column, &key, &value); + } + self.storage.write(transaction).map_err(|e| e.into()) + } + + pub fn get_rocksdb(&self) -> Option<&RocksDB> { + self.storage.as_rocksdb() + } +} + +/// Keeps track of current changes to the database and can commit all of them to the database. +pub struct StoreUpdate { + storage: Pin>, + transaction: DBTransaction, + /// Optionally has reference to the trie to clear cache on the commit. + tries: Option, +} + +impl StoreUpdate { + pub fn new(storage: Pin>) -> Self { + let transaction = storage.transaction(); + StoreUpdate { storage, transaction, tries: None } + } + + pub fn new_with_tries(tries: ShardTries) -> Self { + let storage = tries.get_store().storage.clone(); + let transaction = storage.transaction(); + StoreUpdate { storage, transaction, tries: Some(tries) } + } + + pub fn update_refcount(&mut self, column: DBCol, key: &[u8], value: &[u8], rc_delta: i64) { + debug_assert!(column.is_rc()); + let value = encode_value_with_rc(value, rc_delta); + self.transaction.update_refcount(column, key, value) + } + + pub fn set(&mut self, column: DBCol, key: &[u8], value: &[u8]) { + self.transaction.put(column, key, value) + } + + pub fn set_ser( + &mut self, + column: DBCol, + key: &[u8], + value: &T, + ) -> Result<(), io::Error> { + debug_assert!(!column.is_rc()); + let data = value.try_to_vec()?; + self.set(column, key, &data); + Ok(()) + } + + pub fn delete(&mut self, column: DBCol, key: &[u8]) { + self.transaction.delete(column, key); + } + + pub fn delete_all(&mut self, column: DBCol) { + self.transaction.delete_all(column); + } + + /// Merge another store update into this one. + pub fn merge(&mut self, other: StoreUpdate) { + if let Some(tries) = other.tries { + if self.tries.is_none() { + self.tries = Some(tries); + } else { + debug_assert!(self.tries.as_ref().unwrap().is_same(&tries)); + } + } + + self.merge_transaction(other.transaction); } - pub fn remove(&mut self, key: &DBKey) -> bool { - self.0.remove(key).is_some() + /// Merge DB Transaction. + pub fn merge_transaction(&mut self, transaction: DBTransaction) { + for op in transaction.ops { + match op { + DBOp::Insert { col, key, value } => self.transaction.put(col, &key, &value), + DBOp::Delete { col, key } => self.transaction.delete(col, &key), + DBOp::UpdateRefcount { col, key, value } => { + self.transaction.update_refcount(col, &key, &value) + } + DBOp::DeleteAll { col } => self.transaction.delete_all(col), + } + } } - /// will replace content if occupied - pub fn force_set(&mut self, key: &DBKey, content: Vec) -> bool { - self.0.insert(key, content); - true + pub fn commit(self) -> Result<(), io::Error> { + debug_assert!( + { + let non_refcount_keys = self + .transaction + .ops + .iter() + .filter_map(|op| match op { + DBOp::Insert { col, key, .. } => Some((*col as u8, key)), + DBOp::Delete { col, key } => Some((*col as u8, key)), + DBOp::UpdateRefcount { .. } => None, + DBOp::DeleteAll { .. } => None, + }) + .collect::>(); + non_refcount_keys.len() + == non_refcount_keys.iter().collect::>().len() + }, + "Transaction overwrites itself: {:?}", + self + ); + if let Some(tries) = self.tries { + assert_eq!( + tries.get_store().storage.deref() as *const _, + self.storage.deref() as *const _ + ); + tries.update_cache(&self.transaction)?; + } + self.storage.write(self.transaction).map_err(|e| e.into()) } +} - /// may fail when the key is occupied - pub fn try_set(&mut self, key: &DBKey, content: Vec) -> bool { - self.0.try_insert(key, content).is_ok() +impl fmt::Debug for StoreUpdate { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Store Update {{")?; + for op in self.transaction.ops.iter() { + match op { + DBOp::Insert { col, key, .. } => writeln!(f, " + {:?} {}", col, to_base(key))?, + DBOp::UpdateRefcount { col, key, .. } => { + writeln!(f, " +- {:?} {}", col, to_base(key))? + } + DBOp::Delete { col, key } => writeln!(f, " - {:?} {}", col, to_base(key))?, + DBOp::DeleteAll { col } => writeln!(f, " delete all {:?}", col)?, + } + } + writeln!(f, "}}") } } -pub fn get_code(store: &Store, account_id: AccountId) -> Option { - match store.get(&DBKey::ContractCode {account_id: account_id})) { - Some(code) => ContractCode::new(code), - None => None, +pub fn read_with_cache<'a, T: BorshDeserialize + 'a>( + storage: &Store, + col: DBCol, + cache: &'a mut LruCache, T>, + key: &[u8], +) -> io::Result> { + let key_vec = key.to_vec(); + if cache.get(&key_vec).is_some() { + return Ok(Some(cache.get(&key_vec).unwrap())); } + if let Some(result) = storage.get_ser(col, key)? { + cache.put(key.to_vec(), result); + return Ok(cache.get(&key_vec)); + } + Ok(None) +} + +pub fn create_store(_path: &Path) -> Arc { + // let db = Arc::pin(RocksDB::new(path).expect("Failed to open the database")); + let db = Arc::pin(TestDB::new()); + Arc::new(Store::new(db)) +} + +/// Reads an object from Trie. +/// # Errors +/// see StorageError +pub fn get( + state_update: &TrieUpdate, + key: &TrieKey, +) -> Result, StorageError> { + state_update.get(key).and_then(|opt| { + opt.map_or_else( + || Ok(None), + |data| { + T::try_from_slice(&data) + .map_err(|_| { + StorageError::StorageInconsistentState("Failed to deserialize".to_string()) + }) + .map(Some) + }, + ) + }) +} + +/// Writes an object into Trie. +pub fn set(state_update: &mut TrieUpdate, key: TrieKey, value: &T) { + let data = value.try_to_vec().expect("Borsh serializer is not expected to ever fail"); + state_update.set(key, data); +} + +pub fn set_account(state_update: &mut TrieUpdate, account_id: AccountId, account: &Account) { + set(state_update, TrieKey::Account { account_id }, account) +} + +pub fn get_account( + state_update: &TrieUpdate, + account_id: &AccountId, +) -> Result, StorageError> { + get(state_update, &TrieKey::Account { account_id: account_id.clone() }) } -// code -pub fn try_set_code(store: &mut Store, account_id: AccountId, code: &ContractCode) -> bool { - store.try_set(&DBKey::ContractCode {account_id: account_id}, code.code) +pub fn set_received_data( + state_update: &mut TrieUpdate, + receiver_id: AccountId, + data_id: CryptoHash, + data: &ReceivedData, +) { + set(state_update, TrieKey::ReceivedData { receiver_id, data_id }, data); } -pub fn force_set_code(store: &mut Store, account_id: AccountId, code: &ContractCode) -> bool { - store.force_set(&DBKey::ContractCode {account_id: account_id}, code.code) +pub fn get_received_data( + state_update: &TrieUpdate, + receiver_id: &AccountId, + data_id: CryptoHash, +) -> Result, StorageError> { + get(state_update, &TrieKey::ReceivedData { receiver_id: receiver_id.clone(), data_id }) } -pub fn get_code(store: &mut Store, account_id: AccountId) -> bool { - store.remove(&DBKey::ContractCode {account_id: account_id}) +pub fn set_postponed_receipt(state_update: &mut TrieUpdate, receipt: &Receipt) { + let key = TrieKey::PostponedReceipt { + receiver_id: receipt.receiver_id.clone(), + receipt_id: receipt.receipt_id, + }; + set(state_update, key, receipt); } -// account -pub fn try_set_code(store: &mut Store, account_id: AccountId, code: &ContractCode) -> bool { - store.try_set(&DBKey::ContractCode {account_id: account_id}, code.code) +pub fn remove_postponed_receipt( + state_update: &mut TrieUpdate, + receiver_id: &AccountId, + receipt_id: CryptoHash, +) { + state_update.remove(TrieKey::PostponedReceipt { receiver_id: receiver_id.clone(), receipt_id }); } -pub fn force_set_code(store: &mut Store, account_id: AccountId, code: &ContractCode) -> bool { - store.force_set(&DBKey::ContractCode {account_id: account_id}, code.code) +pub fn get_postponed_receipt( + state_update: &TrieUpdate, + receiver_id: &AccountId, + receipt_id: CryptoHash, +) -> Result, StorageError> { + get(state_update, &TrieKey::PostponedReceipt { receiver_id: receiver_id.clone(), receipt_id }) } -pub fn get_code(store: &mut Store, account_id: AccountId) -> bool { - store.remove(&DBKey::ContractCode {account_id: account_id}) +pub fn get_delayed_receipt_indices( + state_update: &TrieUpdate, +) -> Result { + Ok(get(state_update, &TrieKey::DelayedReceiptIndices)?.unwrap_or_default()) +} + +pub fn set_access_key( + state_update: &mut TrieUpdate, + account_id: AccountId, + public_key: PublicKey, + access_key: &AccessKey, +) { + set(state_update, TrieKey::AccessKey { account_id, public_key }, access_key); +} + +pub fn remove_access_key( + state_update: &mut TrieUpdate, + account_id: AccountId, + public_key: PublicKey, +) { + state_update.remove(TrieKey::AccessKey { account_id, public_key }); +} + +pub fn get_access_key( + state_update: &TrieUpdate, + account_id: &AccountId, + public_key: &PublicKey, +) -> Result, StorageError> { + get( + state_update, + &TrieKey::AccessKey { account_id: account_id.clone(), public_key: public_key.clone() }, + ) +} + +pub fn get_access_key_raw( + state_update: &TrieUpdate, + raw_key: &[u8], +) -> Result, StorageError> { + get( + state_update, + &trie_key_parsers::parse_trie_key_access_key_from_raw_key(raw_key) + .expect("access key in the state should be correct"), + ) +} + +pub fn set_code(state_update: &mut TrieUpdate, account_id: AccountId, code: &ContractCode) { + state_update.set(TrieKey::ContractCode { account_id }, code.code().to_vec()); +} + +pub fn get_code( + state_update: &TrieUpdate, + account_id: &AccountId, + code_hash: Option, +) -> Result, StorageError> { + state_update + .get(&TrieKey::ContractCode { account_id: account_id.clone() }) + .map(|opt| opt.map(|code| ContractCode::new(code, code_hash))) +} + +/// Removes account, code and all access keys associated to it. +pub fn remove_account( + state_update: &mut TrieUpdate, + account_id: &AccountId, +) -> Result<(), StorageError> { + state_update.remove(TrieKey::Account { account_id: account_id.clone() }); + state_update.remove(TrieKey::ContractCode { account_id: account_id.clone() }); + + // Removing access keys + let public_keys = state_update + .iter(&trie_key_parsers::get_raw_prefix_for_access_keys(account_id))? + .map(|raw_key| { + trie_key_parsers::parse_public_key_from_access_key_key(&raw_key?, account_id).map_err( + |_e| { + StorageError::StorageInconsistentState( + "Can't parse public key from raw key for AccessKey".to_string(), + ) + }, + ) + }) + .collect::, _>>()?; + for public_key in public_keys { + state_update.remove(TrieKey::AccessKey { account_id: account_id.clone(), public_key }); + } + + // Removing contract data + let data_keys = state_update + .iter(&trie_key_parsers::get_raw_prefix_for_contract_data(account_id, &[]))? + .map(|raw_key| { + trie_key_parsers::parse_data_key_from_contract_data_key(&raw_key?, account_id) + .map_err(|_e| { + StorageError::StorageInconsistentState( + "Can't parse data key from raw key for ContractData".to_string(), + ) + }) + .map(Vec::from) + }) + .collect::, _>>()?; + for key in data_keys { + state_update.remove(TrieKey::ContractData { account_id: account_id.clone(), key }); + } + Ok(()) +} + +pub fn get_genesis_state_roots(store: &Store) -> Result>, std::io::Error> { + store.get_ser::>(DBCol::ColBlockMisc, GENESIS_STATE_ROOTS_KEY) +} + +pub fn get_genesis_hash(store: &Store) -> Result, std::io::Error> { + store.get_ser::(DBCol::ColBlockMisc, GENESIS_JSON_HASH_KEY) +} + +pub fn set_genesis_hash(store_update: &mut StoreUpdate, genesis_hash: &CryptoHash) { + store_update + .set_ser::(DBCol::ColBlockMisc, GENESIS_JSON_HASH_KEY, genesis_hash) + .expect("Borsh cannot fail"); +} + +pub fn set_genesis_state_roots(store_update: &mut StoreUpdate, genesis_roots: &Vec) { + store_update + .set_ser::>(DBCol::ColBlockMisc, GENESIS_STATE_ROOTS_KEY, genesis_roots) + .expect("Borsh cannot fail"); +} + +pub struct StoreCompiledContractCache { + pub store: Arc, +} + +/// Cache for compiled contracts code using Store for keeping data. +/// We store contracts in VM-specific format in DBCol::ColCachedContractCode. +/// Key must take into account VM being used and its configuration, so that +/// we don't cache non-gas metered binaries, for example. +impl CompiledContractCache for StoreCompiledContractCache { + fn put(&self, key: &[u8], value: &[u8]) -> Result<(), std::io::Error> { + let mut store_update = self.store.store_update(); + store_update.set(DBCol::ColCachedContractCode, key, value); + store_update.commit() + } + + fn get(&self, key: &[u8]) -> Result>, std::io::Error> { + self.store.get(DBCol::ColCachedContractCode, key) + } +} + +#[cfg(test)] +mod tests { + #[test] + fn test_no_cache_disabled() { + #[cfg(feature = "no_cache")] + panic!("no cache is enabled"); + } } diff --git a/mock-enclave/src/skw-vm-store/src/migrations.rs b/mock-enclave/src/skw-vm-store/src/migrations.rs new file mode 100644 index 0000000..021bcc7 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/migrations.rs @@ -0,0 +1,840 @@ +use std::collections::hash_map::Entry; +use std::collections::{HashMap, HashSet}; +use std::rc::Rc; +use std::sync::Arc; + +use borsh::{BorshDeserialize, BorshSerialize}; + +use near_crypto::KeyType; +use near_primitives::block::{Block, Tip}; +use near_primitives::block_header::BlockHeader; +use near_primitives::epoch_manager::epoch_info::{EpochInfo, EpochInfoV1}; +use near_primitives::hash::{hash, CryptoHash}; +use near_primitives::merkle::{merklize, PartialMerkleTree}; +use near_primitives::receipt::{DelayedReceiptIndices, Receipt, ReceiptEnum}; +use near_primitives::shard_layout::ShardUId; +use near_primitives::sharding::{ + EncodedShardChunk, EncodedShardChunkV1, PartialEncodedChunk, PartialEncodedChunkV1, + ReceiptList, ReceiptProof, ReedSolomonWrapper, ShardChunk, ShardChunkV1, ShardProof, +}; +use near_primitives::syncing::{ShardStateSyncResponseHeader, ShardStateSyncResponseHeaderV1}; +use near_primitives::transaction::ExecutionOutcomeWithIdAndProof; +use near_primitives::trie_key::TrieKey; +use near_primitives::types::validator_stake::ValidatorStake; +use near_primitives::types::{AccountId, Balance}; +use near_primitives::utils::{ + create_receipt_id_from_transaction, get_block_shard_id, index_to_bytes, +}; +use near_primitives::validator_signer::InMemoryValidatorSigner; +use near_primitives::version::DbVersion; + +use crate::db::DBCol::{ + ColBlockHeader, ColBlockHeight, ColBlockMerkleTree, ColBlockMisc, ColBlockOrdinal, ColChunks, + ColPartialChunks, ColStateParts, +}; +use crate::db::{DBCol, RocksDB, GENESIS_JSON_HASH_KEY, VERSION_KEY}; +use crate::migrations::v6_to_v7::{ + col_state_refcount_8byte, migrate_col_transaction_refcount, migrate_receipts_refcount, +}; +use crate::migrations::v8_to_v9::{ + recompute_col_rc, repair_col_receipt_id_to_shard_id, repair_col_transactions, +}; +use crate::trie::{TrieCache, TrieCachingStorage}; +use crate::{create_store, Store, StoreUpdate, Trie, TrieUpdate, FINAL_HEAD_KEY, HEAD_KEY}; +use std::path::Path; + +pub mod v6_to_v7; +pub mod v8_to_v9; + +pub fn get_store_version(path: &Path) -> DbVersion { + RocksDB::get_version(path).expect("Failed to open the database") +} + +fn set_store_version_inner(store_update: &mut StoreUpdate, db_version: u32) { + store_update.set( + DBCol::ColDbVersion, + VERSION_KEY, + &serde_json::to_vec(&db_version).expect("Failed to serialize version"), + ); +} + +pub fn set_store_version(store: &Store, db_version: u32) { + let mut store_update = store.store_update(); + set_store_version_inner(&mut store_update, db_version); + store_update.commit().expect("Failed to write version to database"); +} + +fn get_outcomes_by_block_hash(store: &Store, block_hash: &CryptoHash) -> HashSet { + match store.get_ser(DBCol::ColOutcomeIds, block_hash.as_ref()) { + Ok(Some(hash_set)) => hash_set, + Ok(None) => HashSet::new(), + Err(e) => panic!("Can't read DB, {:?}", e), + } +} + +pub fn fill_col_outcomes_by_hash(store: &Store) { + let mut store_update = store.store_update(); + let outcomes: Vec = store + .iter(DBCol::ColTransactionResult) + .map(|key| { + ExecutionOutcomeWithIdAndProof::try_from_slice(&key.1) + .expect("BorshDeserialize should not fail") + }) + .collect(); + let mut block_hash_to_outcomes: HashMap> = HashMap::new(); + for outcome in outcomes { + match block_hash_to_outcomes.entry(outcome.block_hash) { + Entry::Occupied(mut entry) => { + entry.get_mut().insert(*outcome.id()); + } + Entry::Vacant(entry) => { + let mut hash_set = get_outcomes_by_block_hash(store, &outcome.block_hash); + hash_set.insert(*outcome.id()); + entry.insert(hash_set); + } + }; + } + for (block_hash, hash_set) in block_hash_to_outcomes { + store_update + .set_ser(DBCol::ColOutcomeIds, block_hash.as_ref(), &hash_set) + .expect("BorshSerialize should not fail"); + } + store_update.commit().expect("Failed to migrate"); +} + +pub fn fill_col_transaction_refcount(store: &Store) { + let mut store_update = store.store_update(); + let chunks: Vec = store + .iter(DBCol::ColChunks) + .map(|key| ShardChunkV1::try_from_slice(&key.1).expect("BorshDeserialize should not fail")) + .collect(); + + let mut tx_refcount: HashMap = HashMap::new(); + for chunk in chunks { + for tx in chunk.transactions { + tx_refcount.entry(tx.get_hash()).and_modify(|x| *x += 1).or_insert(1); + } + } + for (tx_hash, refcount) in tx_refcount { + store_update + .set_ser(DBCol::_ColTransactionRefCount, tx_hash.as_ref(), &refcount) + .expect("BorshSerialize should not fail"); + } + store_update.commit().expect("Failed to migrate"); +} + +fn recompute_block_ordinal(store: &Store) { + let mut store_update = BatchedStoreUpdate::new(store, 10_000_000); + for (_, value) in store.iter(ColBlockHeight) { + let block_merkle_tree = + store.get_ser::(ColBlockMerkleTree, &value).unwrap().unwrap(); + let block_hash = CryptoHash::try_from_slice(&value).unwrap(); + store_update + .set_ser(ColBlockOrdinal, &index_to_bytes(block_merkle_tree.size()), &block_hash) + .unwrap(); + } + store_update.finish().unwrap(); +} + +pub fn migrate_6_to_7(path: &Path) { + let db = Arc::pin(RocksDB::new_v6(path).expect("Failed to open the database")); + let store = Store::new(db); + let mut store_update = store.store_update(); + col_state_refcount_8byte(&store, &mut store_update); + migrate_col_transaction_refcount(&store, &mut store_update); + migrate_receipts_refcount(&store, &mut store_update); + set_store_version_inner(&mut store_update, 7); + store_update.commit().expect("Failed to migrate") +} + +pub fn migrate_7_to_8(path: &Path) { + let store = create_store(path); + let mut store_update = store.store_update(); + for (key, _) in store.iter_without_rc_logic(ColStateParts) { + store_update.delete(ColStateParts, &key); + } + set_store_version_inner(&mut store_update, 8); + store_update.commit().expect("Fail to migrate from DB version 7 to DB version 8"); +} + +// No format change. Recompute ColTransactions and ColReceiptIdToShardId because they could be inconsistent. +pub fn migrate_8_to_9(path: &Path) { + let store = create_store(path); + repair_col_transactions(&store); + repair_col_receipt_id_to_shard_id(&store); + set_store_version(&store, 9); +} + +pub fn migrate_9_to_10(path: &Path, is_archival: bool) { + let store = create_store(path); + let protocol_version = 38; // protocol_version at the time this migration was written + if is_archival { + // Hard code the number of parts there. These numbers are only used for this migration. + let num_total_parts = 100; + let num_data_parts = (num_total_parts - 1) / 3; + let num_parity_parts = num_total_parts - num_data_parts; + let mut rs = ReedSolomonWrapper::new(num_data_parts, num_parity_parts); + let signer = + InMemoryValidatorSigner::from_seed("test".parse().unwrap(), KeyType::ED25519, "test"); + let mut store_update = store.store_update(); + let batch_size_limit = 10_000_000; + let mut batch_size = 0; + for (key, value) in store.iter_without_rc_logic(ColChunks) { + if let Ok(Some(partial_chunk)) = + store.get_ser::(ColPartialChunks, &key) + { + if partial_chunk.parts.len() == num_total_parts { + continue; + } + } + batch_size += key.len() + value.len() + 8; + let chunk: ShardChunkV1 = BorshDeserialize::try_from_slice(&value) + .expect("Borsh deserialization should not fail"); + let ShardChunkV1 { chunk_hash, header, transactions, receipts } = chunk; + let proposals = header + .inner + .validator_proposals + .iter() + .map(|v| ValidatorStake::V1(v.clone())) + .collect(); + let (encoded_chunk, merkle_paths) = EncodedShardChunk::new( + header.inner.prev_block_hash, + header.inner.prev_state_root, + header.inner.outcome_root, + header.inner.height_created, + header.inner.shard_id, + &mut rs, + header.inner.gas_used, + header.inner.gas_limit, + header.inner.balance_burnt, + header.inner.tx_root, + proposals, + transactions, + &receipts, + header.inner.outgoing_receipts_root, + &signer, + protocol_version, + ) + .expect("create encoded chunk should not fail"); + let mut encoded_chunk = match encoded_chunk { + EncodedShardChunk::V1(chunk) => chunk, + EncodedShardChunk::V2(_) => panic!("Should not have created EncodedShardChunkV2"), + }; + encoded_chunk.header = header; + let outgoing_receipt_hashes = + vec![hash(&ReceiptList(0, &receipts).try_to_vec().unwrap())]; + let (_, outgoing_receipt_proof) = merklize(&outgoing_receipt_hashes); + + let partial_encoded_chunk = EncodedShardChunk::V1(encoded_chunk) + .create_partial_encoded_chunk( + (0..num_total_parts as u64).collect(), + vec![ReceiptProof( + receipts, + ShardProof { + from_shard_id: 0, + to_shard_id: 0, + proof: outgoing_receipt_proof[0].clone(), + }, + )], + &merkle_paths, + ); + let partial_encoded_chunk = match partial_encoded_chunk { + PartialEncodedChunk::V1(chunk) => chunk, + PartialEncodedChunk::V2(_) => { + panic!("Should not have created PartialEncodedChunkV2") + } + }; + store_update + .set_ser(ColPartialChunks, chunk_hash.as_ref(), &partial_encoded_chunk) + .expect("storage update should not fail"); + if batch_size > batch_size_limit { + store_update.commit().expect("storage update should not fail"); + store_update = store.store_update(); + batch_size = 0; + } + } + store_update.commit().expect("storage update should not fail"); + } + set_store_version(&store, 10); +} + +pub fn migrate_10_to_11(path: &Path) { + let store = create_store(path); + let mut store_update = store.store_update(); + let head = store.get_ser::(ColBlockMisc, HEAD_KEY).unwrap().expect("head must exist"); + let block_header = store + .get_ser::(ColBlockHeader, head.last_block_hash.as_ref()) + .unwrap() + .expect("head header must exist"); + let last_final_block_hash = if block_header.last_final_block() == &CryptoHash::default() { + let mut cur_header = block_header; + while cur_header.prev_hash() != &CryptoHash::default() { + cur_header = store + .get_ser::(ColBlockHeader, cur_header.prev_hash().as_ref()) + .unwrap() + .unwrap() + } + *cur_header.hash() + } else { + *block_header.last_final_block() + }; + let last_final_header = store + .get_ser::(ColBlockHeader, last_final_block_hash.as_ref()) + .unwrap() + .expect("last final block header must exist"); + let final_head = Tip::from_header(&last_final_header); + store_update.set_ser(ColBlockMisc, FINAL_HEAD_KEY, &final_head).unwrap(); + store_update.commit().unwrap(); + set_store_version(&store, 11); +} + +pub fn migrate_11_to_12(path: &Path) { + let store = create_store(path); + recompute_col_rc( + &store, + DBCol::ColReceipts, + store + .iter(DBCol::ColChunks) + .map(|(_key, value)| { + ShardChunkV1::try_from_slice(&value).expect("BorshDeserialize should not fail") + }) + .flat_map(|chunk: ShardChunkV1| chunk.receipts) + .map(|rx| (rx.receipt_id, rx.try_to_vec().unwrap())), + ); + set_store_version(&store, 12); +} + +pub struct BatchedStoreUpdate<'a> { + batch_size_limit: usize, + batch_size: usize, + store: &'a Store, + store_update: Option, +} + +impl<'a> BatchedStoreUpdate<'a> { + pub fn new(store: &'a Store, batch_size_limit: usize) -> Self { + Self { batch_size_limit, batch_size: 0, store, store_update: Some(store.store_update()) } + } + + fn commit(&mut self) -> Result<(), std::io::Error> { + let store_update = self.store_update.take().unwrap(); + store_update.commit()?; + self.store_update = Some(self.store.store_update()); + self.batch_size = 0; + Ok(()) + } + + pub fn set_ser( + &mut self, + col: DBCol, + key: &[u8], + value: &T, + ) -> Result<(), std::io::Error> { + let value_bytes = value.try_to_vec()?; + self.batch_size += key.as_ref().len() + value_bytes.len() + 8; + self.store_update.as_mut().unwrap().set(col, key.as_ref(), &value_bytes); + + if self.batch_size > self.batch_size_limit { + self.commit()?; + } + + Ok(()) + } + + pub fn finish(mut self) -> Result<(), std::io::Error> { + if self.batch_size > 0 { + self.commit()?; + } + + Ok(()) + } +} + +fn map_col(store: &Store, col: DBCol, f: F) -> Result<(), std::io::Error> +where + T: BorshDeserialize, + U: BorshSerialize, + F: Fn(T) -> U, +{ + let keys: Vec<_> = store.iter(col).map(|(key, _)| key).collect(); + let mut store_update = BatchedStoreUpdate::new(store, 10_000_000); + + for key in keys { + let value: T = store.get_ser(col, key.as_ref())?.unwrap(); + let new_value = f(value); + store_update.set_ser(col, key.as_ref(), &new_value)?; + } + + store_update.finish()?; + + Ok(()) +} + +#[allow(unused)] +fn map_col_from_key(store: &Store, col: DBCol, f: F) -> Result<(), std::io::Error> +where + U: BorshSerialize, + F: Fn(&[u8]) -> U, +{ + let mut store_update = store.store_update(); + let batch_size_limit = 10_000_000; + let mut batch_size = 0; + for (key, _) in store.iter(col) { + let new_value = f(&key); + let new_bytes = new_value.try_to_vec()?; + batch_size += key.as_ref().len() + new_bytes.len() + 8; + store_update.set(col, key.as_ref(), &new_bytes); + + if batch_size > batch_size_limit { + store_update.commit()?; + store_update = store.store_update(); + batch_size = 0; + } + } + + if batch_size > 0 { + store_update.commit()?; + } + + Ok(()) +} + +/// Lift all chunks to the versioned structure +pub fn migrate_13_to_14(path: &Path) { + let store = create_store(path); + + map_col(&store, DBCol::ColPartialChunks, |pec: PartialEncodedChunkV1| { + PartialEncodedChunk::V1(pec) + }) + .unwrap(); + map_col(&store, DBCol::ColInvalidChunks, |chunk: EncodedShardChunkV1| { + EncodedShardChunk::V1(chunk) + }) + .unwrap(); + map_col(&store, DBCol::ColChunks, ShardChunk::V1).unwrap(); + map_col(&store, DBCol::ColStateHeaders, |header: ShardStateSyncResponseHeaderV1| { + ShardStateSyncResponseHeader::V1(header) + }) + .unwrap(); + + set_store_version(&store, 14); +} + +/// Make execution outcome ids in `ColOutcomeIds` ordered by replaying the chunks. +pub fn migrate_14_to_15(path: &Path) { + let store = create_store(path); + let trie_store = Box::new(TrieCachingStorage::new( + store.clone(), + TrieCache::new(), + ShardUId::single_shard(), + )); + let trie = Rc::new(Trie::new(trie_store, ShardUId::single_shard())); + + let mut store_update = store.store_update(); + let batch_size_limit = 10_000_000; + let mut batch_size = 0; + + for (key, value) in store.iter_without_rc_logic(DBCol::ColOutcomeIds) { + let block_hash = CryptoHash::try_from_slice(&key).unwrap(); + let block = + store.get_ser::(DBCol::ColBlock, &key).unwrap().expect("block should exist"); + + for chunk_header in + block.chunks().iter().filter(|h| h.height_included() == block.header().height()) + { + let execution_outcome_ids = >::try_from_slice(&value).unwrap(); + + let chunk = store + .get_ser::(DBCol::ColChunks, chunk_header.chunk_hash().as_ref()) + .unwrap() + .expect("chunk should exist"); + + let epoch_info = store + .get_ser::(DBCol::ColEpochInfo, block.header().epoch_id().as_ref()) + .unwrap() + .expect("epoch id should exist"); + let protocol_version = epoch_info.protocol_version; + + let mut new_execution_outcome_ids = vec![]; + let mut local_receipt_ids = vec![]; + let mut local_receipt_congestion = false; + + // Step 0: execution outcomes of transactions + for transaction in chunk.transactions() { + let tx_hash = transaction.get_hash(); + // Transactions must all be executed since when chunk is produced, there is a gas + // limit check. + assert!( + execution_outcome_ids.contains(&tx_hash), + "transaction hash {} does not exist in block {}", + tx_hash, + block_hash + ); + new_execution_outcome_ids.push(tx_hash); + if transaction.transaction.signer_id == transaction.transaction.receiver_id { + let local_receipt_id = create_receipt_id_from_transaction( + protocol_version, + transaction, + block.header().prev_hash(), + block.header().hash(), + ); + if execution_outcome_ids.contains(&local_receipt_id) { + local_receipt_ids.push(local_receipt_id); + } else { + local_receipt_congestion = true; + } + } + } + + // Step 1: local receipts + new_execution_outcome_ids.extend(local_receipt_ids); + + let mut state_update = TrieUpdate::new(trie.clone(), chunk.prev_state_root()); + + let mut process_receipt = + |receipt: &Receipt, state_update: &mut TrieUpdate| match &receipt.receipt { + ReceiptEnum::Action(_) => { + if execution_outcome_ids.contains(&receipt.receipt_id) { + new_execution_outcome_ids.push(receipt.receipt_id); + } + } + ReceiptEnum::Data(data_receipt) => { + if let Ok(Some(bytes)) = state_update.get(&TrieKey::PostponedReceiptId { + receiver_id: receipt.receiver_id.clone(), + data_id: data_receipt.data_id, + }) { + let receipt_id = CryptoHash::try_from_slice(&bytes).unwrap(); + let trie_key = TrieKey::PendingDataCount { + receiver_id: receipt.receiver_id.clone(), + receipt_id, + }; + let pending_receipt_count = + u32::try_from_slice(&state_update.get(&trie_key).unwrap().unwrap()) + .unwrap(); + if pending_receipt_count == 1 + && execution_outcome_ids.contains(&receipt_id) + { + new_execution_outcome_ids.push(receipt_id); + } + state_update + .set(trie_key, (pending_receipt_count - 1).try_to_vec().unwrap()) + } + } + }; + + // Step 2: delayed receipts + if !local_receipt_congestion { + let mut delayed_receipt_indices: DelayedReceiptIndices = state_update + .get(&TrieKey::DelayedReceiptIndices) + .map(|bytes| { + bytes + .map(|b| DelayedReceiptIndices::try_from_slice(&b).unwrap()) + .unwrap_or_default() + }) + .unwrap_or_default(); + + while delayed_receipt_indices.first_index + < delayed_receipt_indices.next_available_index + { + let receipt: Receipt = state_update + .get(&TrieKey::DelayedReceipt { + index: delayed_receipt_indices.first_index, + }) + .unwrap() + .map(|bytes| Receipt::try_from_slice(&bytes).unwrap()) + .unwrap(); + process_receipt(&receipt, &mut state_update); + delayed_receipt_indices.first_index += 1; + } + } + + // Step 3: receipts + for receipt in chunk.receipts() { + process_receipt(receipt, &mut state_update); + } + assert_eq!( + new_execution_outcome_ids.len(), + execution_outcome_ids.len(), + "inconsistent number of outcomes detected while migrating block {}: {:?} vs. {:?}", + block_hash, + new_execution_outcome_ids, + execution_outcome_ids + ); + let value = new_execution_outcome_ids.try_to_vec().unwrap(); + store_update.set( + DBCol::ColOutcomeIds, + &get_block_shard_id(&block_hash, chunk_header.shard_id()), + &value, + ); + store_update.delete(DBCol::ColOutcomeIds, &key); + batch_size += key.len() + value.len() + 40; + if batch_size > batch_size_limit { + store_update.commit().unwrap(); + store_update = store.store_update(); + batch_size = 0; + } + } + } + store_update.commit().unwrap(); + set_store_version(&store, 15); +} + +pub fn migrate_17_to_18(path: &Path) { + use near_primitives::challenge::SlashedValidator; + use near_primitives::types::validator_stake::ValidatorStakeV1; + use near_primitives::types::{BlockHeight, EpochId}; + use near_primitives::version::ProtocolVersion; + + // Migrate from OldBlockInfo to NewBlockInfo - add hash + #[derive(BorshDeserialize)] + struct OldBlockInfo { + pub height: BlockHeight, + pub last_finalized_height: BlockHeight, + pub last_final_block_hash: CryptoHash, + pub prev_hash: CryptoHash, + pub epoch_first_block: CryptoHash, + pub epoch_id: EpochId, + pub proposals: Vec, + pub validator_mask: Vec, + pub latest_protocol_version: ProtocolVersion, + pub slashed: Vec, + pub total_supply: Balance, + } + #[derive(BorshSerialize)] + struct NewBlockInfo { + pub hash: CryptoHash, + pub height: BlockHeight, + pub last_finalized_height: BlockHeight, + pub last_final_block_hash: CryptoHash, + pub prev_hash: CryptoHash, + pub epoch_first_block: CryptoHash, + pub epoch_id: EpochId, + pub proposals: Vec, + pub validator_mask: Vec, + pub latest_protocol_version: ProtocolVersion, + pub slashed: Vec, + pub total_supply: Balance, + } + let store = create_store(path); + map_col_from_key(&store, DBCol::ColBlockInfo, |key| { + let hash = CryptoHash::try_from(key).unwrap(); + let old_block_info = + store.get_ser::(DBCol::ColBlockInfo, key).unwrap().unwrap(); + NewBlockInfo { + hash, + height: old_block_info.height, + last_finalized_height: old_block_info.last_finalized_height, + last_final_block_hash: old_block_info.last_final_block_hash, + prev_hash: old_block_info.prev_hash, + epoch_first_block: old_block_info.epoch_first_block, + epoch_id: old_block_info.epoch_id, + proposals: old_block_info.proposals, + validator_mask: old_block_info.validator_mask, + latest_protocol_version: old_block_info.latest_protocol_version, + slashed: old_block_info.slashed, + total_supply: old_block_info.total_supply, + } + }) + .unwrap(); + + // Add ColHeaderHashesByHeight lazily + // + // KPR: traversing thru ColBlockHeader at Mainnet (20 mln Headers) + // takes ~13 minutes on my laptop. + // It's annoying to wait until migration finishes + // as real impact is not too big as we don't GC Headers now. + // I expect that after 5 Epochs ColHeaderHashesByHeight will be filled + // properly and we never return to this migration again. + + set_store_version(&store, 18); +} + +pub fn migrate_20_to_21(path: &Path) { + let store = create_store(path); + let mut store_update = store.store_update(); + store_update.delete(DBCol::ColBlockMisc, GENESIS_JSON_HASH_KEY); + store_update.commit().unwrap(); + + set_store_version(&store, 21); +} + +pub fn migrate_21_to_22(path: &Path) { + use near_primitives::epoch_manager::BlockInfoV1; + use near_primitives::epoch_manager::SlashState; + use near_primitives::types::validator_stake::ValidatorStakeV1; + use near_primitives::types::{BlockHeight, EpochId}; + use near_primitives::version::ProtocolVersion; + #[derive(BorshDeserialize)] + struct OldBlockInfo { + pub hash: CryptoHash, + pub height: BlockHeight, + pub last_finalized_height: BlockHeight, + pub last_final_block_hash: CryptoHash, + pub prev_hash: CryptoHash, + pub epoch_first_block: CryptoHash, + pub epoch_id: EpochId, + pub proposals: Vec, + pub chunk_mask: Vec, + pub latest_protocol_version: ProtocolVersion, + pub slashed: HashMap, + pub total_supply: Balance, + } + let store = create_store(path); + map_col_from_key(&store, DBCol::ColBlockInfo, |key| { + let old_block_info = + store.get_ser::(DBCol::ColBlockInfo, key).unwrap().unwrap(); + if key == &[0; 32] { + // dummy value + return BlockInfoV1 { + hash: old_block_info.hash, + height: old_block_info.height, + last_finalized_height: old_block_info.last_finalized_height, + last_final_block_hash: old_block_info.last_final_block_hash, + prev_hash: old_block_info.prev_hash, + epoch_first_block: old_block_info.epoch_first_block, + epoch_id: old_block_info.epoch_id, + proposals: old_block_info.proposals, + chunk_mask: old_block_info.chunk_mask, + latest_protocol_version: old_block_info.latest_protocol_version, + slashed: old_block_info.slashed, + total_supply: old_block_info.total_supply, + timestamp_nanosec: 0, + }; + } + let block_header = + store.get_ser::(DBCol::ColBlockHeader, key).unwrap().unwrap(); + BlockInfoV1 { + hash: old_block_info.hash, + height: old_block_info.height, + last_finalized_height: old_block_info.last_finalized_height, + last_final_block_hash: old_block_info.last_final_block_hash, + prev_hash: old_block_info.prev_hash, + epoch_first_block: old_block_info.epoch_first_block, + epoch_id: old_block_info.epoch_id, + proposals: old_block_info.proposals, + chunk_mask: old_block_info.chunk_mask, + latest_protocol_version: old_block_info.latest_protocol_version, + slashed: old_block_info.slashed, + total_supply: old_block_info.total_supply, + timestamp_nanosec: block_header.raw_timestamp(), + } + }) + .unwrap(); + set_store_version(&store, 22); +} + +pub fn migrate_25_to_26(path: &Path) { + let store = create_store(path); + let mut store_update = store.store_update(); + store_update.delete_all(DBCol::ColCachedContractCode); + store_update.commit().unwrap(); + + set_store_version(&store, 26); +} + +pub fn migrate_26_to_27(path: &Path, is_archival: bool) { + let store = create_store(path); + if is_archival { + recompute_block_ordinal(store.as_ref()); + } + set_store_version(&store, 27); +} + +pub fn migrate_28_to_29(path: &Path) { + let store = create_store(path); + let mut store_update = store.store_update(); + store_update.delete_all(DBCol::_ColNextBlockWithNewChunk); + store_update.delete_all(DBCol::_ColLastBlockWithNewChunk); + store_update.commit().unwrap(); + + set_store_version(&store, 29); +} + +pub fn migrate_29_to_30(path: &Path) { + use near_primitives::epoch_manager::block_info::BlockInfo; + use near_primitives::epoch_manager::epoch_info::EpochSummary; + use near_primitives::epoch_manager::AGGREGATOR_KEY; + use near_primitives::types::chunk_extra::ChunkExtra; + use near_primitives::types::validator_stake::ValidatorStakeV1; + use near_primitives::types::{ + BlockChunkValidatorStats, EpochId, ProtocolVersion, ShardId, ValidatorId, + ValidatorKickoutReason, ValidatorStats, + }; + use std::collections::BTreeMap; + + let store = create_store(path); + + #[derive(BorshDeserialize)] + pub struct OldEpochSummary { + pub prev_epoch_last_block_hash: CryptoHash, + pub all_proposals: Vec, + pub validator_kickout: HashMap, + pub validator_block_chunk_stats: HashMap, + pub next_version: ProtocolVersion, + } + + #[derive(BorshDeserialize)] + pub struct OldEpochInfoAggregator { + pub block_tracker: HashMap, + pub shard_tracker: HashMap>, + pub version_tracker: HashMap, + pub all_proposals: BTreeMap, + pub epoch_id: EpochId, + pub last_block_hash: CryptoHash, + } + #[derive(BorshSerialize)] + pub struct NewEpochInfoAggregator { + pub block_tracker: HashMap, + pub shard_tracker: HashMap>, + pub version_tracker: HashMap, + pub all_proposals: BTreeMap, + pub epoch_id: EpochId, + pub last_block_hash: CryptoHash, + } + + map_col(&store, DBCol::ColChunkExtra, ChunkExtra::V1).unwrap(); + + map_col(&store, DBCol::ColBlockInfo, BlockInfo::V1).unwrap(); + + map_col(&store, DBCol::ColEpochValidatorInfo, |info: OldEpochSummary| EpochSummary { + prev_epoch_last_block_hash: info.prev_epoch_last_block_hash, + all_proposals: info.all_proposals.into_iter().map(ValidatorStake::V1).collect(), + validator_kickout: info.validator_kickout, + validator_block_chunk_stats: info.validator_block_chunk_stats, + next_version: info.next_version, + }) + .unwrap(); + + // DBCol::ColEpochInfo has a special key which contains a different type than all other + // values (EpochInfoAggregator), so we cannot use `map_col` on it. We need to handle + // the AGGREGATOR_KEY differently from all others. + let col = DBCol::ColEpochInfo; + let keys: Vec<_> = store.iter(col).map(|(key, _)| key).collect(); + let mut store_update = BatchedStoreUpdate::new(&store, 10_000_000); + for key in keys { + if key.as_ref() == AGGREGATOR_KEY { + let value: OldEpochInfoAggregator = store.get_ser(col, key.as_ref()).unwrap().unwrap(); + let new_value = NewEpochInfoAggregator { + block_tracker: value.block_tracker, + shard_tracker: value.shard_tracker, + version_tracker: value.version_tracker, + epoch_id: value.epoch_id, + last_block_hash: value.last_block_hash, + all_proposals: value + .all_proposals + .into_iter() + .map(|(account, stake)| (account, ValidatorStake::V1(stake))) + .collect(), + }; + store_update.set_ser(col, key.as_ref(), &new_value).unwrap(); + } else { + let value: EpochInfoV1 = store.get_ser(col, key.as_ref()).unwrap().unwrap(); + let new_value = EpochInfo::V1(value); + store_update.set_ser(col, key.as_ref(), &new_value).unwrap(); + } + } + + store_update.finish().unwrap(); + + set_store_version(&store, 30); +} diff --git a/mock-enclave/src/skw-vm-store/src/migrations/v6_to_v7.rs b/mock-enclave/src/skw-vm-store/src/migrations/v6_to_v7.rs new file mode 100644 index 0000000..8813aa9 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/migrations/v6_to_v7.rs @@ -0,0 +1,126 @@ +use std::collections::HashMap; +use std::io::Cursor; + +use borsh::ser::BorshSerialize; +use byteorder::{LittleEndian, ReadBytesExt}; + +use near_primitives::block::Block; +use near_primitives::borsh::BorshDeserialize; +use near_primitives::hash::{hash, CryptoHash}; +use near_primitives::receipt::Receipt; +use near_primitives::sharding::ShardChunkV1; +use near_primitives::transaction::SignedTransaction; +use near_primitives::types::{AccountId, NumShards, ShardId}; + +use crate::db::refcount::encode_value_with_rc; +use crate::{DBCol, Store, StoreUpdate}; + +// Refcount from i32 to i64 +pub(crate) fn col_state_refcount_8byte(store: &Store, store_update: &mut StoreUpdate) { + for (k, v) in store.iter_without_rc_logic(DBCol::ColState) { + if v.len() < 4 { + store_update.delete(DBCol::ColState, &k); + continue; + } + let mut v = v.into_vec(); + v.extend_from_slice(&[0, 0, 0, 0]); + store_update.set(DBCol::ColState, &k, &v); + } +} + +// Deprecate ColTransactionRefCount, move the info to ColTransactions +pub(crate) fn migrate_col_transaction_refcount(store: &Store, store_update: &mut StoreUpdate) { + let transactions: Vec = store + .iter_without_rc_logic(DBCol::ColTransactions) + .map(|(_key, value)| { + SignedTransaction::try_from_slice(&value).expect("BorshDeserialize should not fail") + }) + .collect(); + let tx_refcount: HashMap = store + .iter(DBCol::_ColTransactionRefCount) + .map(|(key, value)| { + ( + CryptoHash::try_from_slice(&key).expect("BorshDeserialize should not fail"), + u64::try_from_slice(&value).expect("BorshDeserialize should not fail"), + ) + }) + .collect(); + + assert_eq!(transactions.len(), tx_refcount.len()); + + for tx in transactions { + let tx_hash = tx.get_hash(); + let bytes = tx.try_to_vec().expect("BorshSerialize should not fail"); + let rc = *tx_refcount.get(&tx_hash).expect("Inconsistent tx refcount data") as i64; + assert!(rc > 0); + store_update.set( + DBCol::ColTransactions, + tx_hash.as_ref(), + &encode_value_with_rc(&bytes, rc), + ); + store_update.delete(DBCol::_ColTransactionRefCount, tx_hash.as_ref()); + } +} + +pub(crate) fn get_num_shards(store: &Store) -> NumShards { + store + .iter(DBCol::ColBlock) + .map(|(_key, value)| { + Block::try_from_slice(value.as_ref()).expect("BorshDeserialize should not fail") + }) + .map(|block| block.chunks().len() as u64) + .next() + .unwrap_or(1) +} + +pub(crate) fn account_id_to_shard_id_v6(account_id: &AccountId, num_shards: NumShards) -> ShardId { + let mut cursor = Cursor::new(hash(account_id.as_ref().as_bytes()).0); + cursor.read_u64::().expect("Must not happened") % (num_shards) +} + +// Make ColReceiptIdToShardId refcounted +pub(crate) fn migrate_receipts_refcount(store: &Store, store_update: &mut StoreUpdate) { + let receipt_id_to_shard_id: HashMap<_, _> = + store.iter_without_rc_logic(DBCol::ColReceiptIdToShardId).collect(); + + let chunks: Vec = store + .iter(DBCol::ColChunks) + .map(|(_key, value)| { + ShardChunkV1::try_from_slice(&value).expect("BorshDeserialize should not fail") + }) + .collect(); + + let mut receipts: HashMap = HashMap::new(); + for chunk in chunks { + for rx in chunk.receipts { + receipts.entry(rx.receipt_id).and_modify(|(_receipt, rc)| *rc += 1).or_insert((rx, 1)); + } + } + + for (key, bytes) in receipt_id_to_shard_id { + let receipt_id = CryptoHash::try_from(key.as_ref()).unwrap(); + if let Some((_receipt, rc)) = receipts.remove(&receipt_id) { + store_update.set(DBCol::ColReceiptIdToShardId, &key, &encode_value_with_rc(&bytes, rc)); + } else { + store_update.delete(DBCol::ColReceiptIdToShardId, &key); + } + } + + if !receipts.is_empty() { + // It's possible that some receipts are in chunks, but not in ColReceiptIdToShardId. + // We need to write records for them to maintain store invariant, because gc-ing the chunks is + // will decrement rc for receipts. + // + let num_shards = get_num_shards(store); + for (receipt_id, (receipt, rc)) in receipts { + let shard_id = account_id_to_shard_id_v6(&receipt.receiver_id, num_shards) + .try_to_vec() + .expect("BorshSerialize should not fail"); + store_update.set( + DBCol::ColReceiptIdToShardId, + receipt_id.as_ref(), + &encode_value_with_rc(&shard_id, rc), + ); + } + } +} diff --git a/mock-enclave/src/skw-vm-store/src/migrations/v8_to_v9.rs b/mock-enclave/src/skw-vm-store/src/migrations/v8_to_v9.rs new file mode 100644 index 0000000..a1a0aee --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/migrations/v8_to_v9.rs @@ -0,0 +1,80 @@ +use borsh::{BorshDeserialize, BorshSerialize}; + +use near_primitives::hash::CryptoHash; +use near_primitives::receipt::Receipt; +use near_primitives::sharding::ShardChunkV1; + +use crate::migrations::v6_to_v7::{account_id_to_shard_id_v6, get_num_shards}; +use crate::{DBCol, Store}; + +/// Clear all data in the column, insert keys and values from iterator. +/// Uses multiple writes. +pub(crate) fn recompute_col_rc(store: &Store, column: DBCol, values: Iter) +where + Iter: Iterator)>, +{ + assert!(crate::db::IS_COL_RC[column as usize]); + let mut batch_size = 0; + let batch_size_limit = 250_000_000; + + let mut store_update = store.store_update(); + store_update.delete_all(column); + store_update.commit().unwrap(); + + let mut store_update = store.store_update(); + + for (key, value) in values { + store_update.update_refcount(column, key.as_ref(), &value, 1); + batch_size += key.as_ref().len() + value.len() + 8; + if batch_size > batch_size_limit { + store_update + .commit() + .unwrap_or_else(|_| panic!("Failed during recomputing column {:?}", column)); + store_update = store.store_update(); + batch_size = 0; + } + } + + if batch_size > 0 { + store_update + .commit() + .unwrap_or_else(|_| panic!("Failed during recomputing column {:?}", column)); + } +} + +// Make ColTransactions match transactions in ColChunks +pub(crate) fn repair_col_transactions(store: &Store) { + recompute_col_rc( + store, + DBCol::ColTransactions, + store + .iter(DBCol::ColChunks) + .map(|(_key, value)| { + ShardChunkV1::try_from_slice(&value).expect("BorshDeserialize should not fail") + }) + .flat_map(|chunk: ShardChunkV1| chunk.transactions) + .map(|tx| (tx.get_hash(), tx.try_to_vec().unwrap())), + ) +} + +// Make ColReceiptIdToShardId match receipts in ColOutgoingReceipts +pub(crate) fn repair_col_receipt_id_to_shard_id(store: &Store) { + let num_shards = get_num_shards(store); + recompute_col_rc( + store, + DBCol::ColReceiptIdToShardId, + store + .iter(DBCol::ColOutgoingReceipts) + .flat_map(|(_key, value)| { + >::try_from_slice(&value).expect("BorshDeserialize should not fail") + }) + .map(|receipt| { + ( + receipt.receipt_id, + account_id_to_shard_id_v6(&receipt.receiver_id, num_shards) + .try_to_vec() + .unwrap(), + ) + }), + ) +} diff --git a/mock-enclave/src/skw-vm-store/src/test_utils.rs b/mock-enclave/src/skw-vm-store/src/test_utils.rs new file mode 100644 index 0000000..e07bfdc --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/test_utils.rs @@ -0,0 +1,145 @@ +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; + +use rand::seq::SliceRandom; +use rand::Rng; + +use crate::db::TestDB; +use crate::{ShardTries, Store}; +use near_primitives::account::id::AccountId; +use near_primitives::hash::CryptoHash; +use near_primitives::receipt::{DataReceipt, Receipt, ReceiptEnum}; +use near_primitives::shard_layout::{ShardUId, ShardVersion}; +use near_primitives::types::NumShards; +use std::str::from_utf8; + +/// Creates an in-memory database. +pub fn create_test_store() -> Arc { + let db = Arc::pin(TestDB::new()); + Arc::new(Store::new(db)) +} + +/// Creates a Trie using an in-memory database. +pub fn create_tries() -> ShardTries { + let store = create_test_store(); + ShardTries::new(store, 0, 1) +} + +pub fn create_tries_complex(shard_version: ShardVersion, num_shards: NumShards) -> ShardTries { + let store = create_test_store(); + ShardTries::new(store, shard_version, num_shards) +} + +pub fn test_populate_trie( + tries: &ShardTries, + root: &CryptoHash, + shard_uid: ShardUId, + changes: Vec<(Vec, Option>)>, +) -> CryptoHash { + let trie = tries.get_trie_for_shard(shard_uid); + assert_eq!(trie.storage.as_caching_storage().unwrap().shard_uid.shard_id, 0); + let trie_changes = trie.update(root, changes.iter().cloned()).unwrap(); + let (store_update, root) = tries.apply_all(&trie_changes, shard_uid).unwrap(); + store_update.commit().unwrap(); + let deduped = simplify_changes(&changes); + for (key, value) in deduped { + assert_eq!(trie.get(&root, &key), Ok(value)); + } + root +} + +fn gen_accounts_from_alphabet( + rng: &mut impl Rng, + max_size: usize, + alphabet: &[u8], +) -> Vec { + let size = rng.gen_range(0, max_size) + 1; + + std::iter::repeat_with(|| gen_account(rng, alphabet)).take(size).collect() +} + +pub fn gen_account(rng: &mut impl Rng, alphabet: &[u8]) -> AccountId { + let str_length = rng.gen_range(4, 8); + let s: Vec = (0..str_length).map(|_| *alphabet.choose(rng).unwrap()).collect(); + from_utf8(&s).unwrap().parse().unwrap() +} + +pub fn gen_unique_accounts(rng: &mut impl Rng, max_size: usize) -> Vec { + let alphabet = b"abcdefghijklmn"; + let accounts = gen_accounts_from_alphabet(rng, max_size, alphabet); + accounts.into_iter().collect::>().into_iter().collect() +} + +pub fn gen_receipts(rng: &mut impl Rng, max_size: usize) -> Vec { + let alphabet = &b"abcdefgh"[0..rng.gen_range(4, 8)]; + let accounts = gen_accounts_from_alphabet(rng, max_size, alphabet); + accounts + .iter() + .map(|account_id| Receipt { + predecessor_id: account_id.clone(), + receiver_id: account_id.clone(), + receipt_id: CryptoHash::default(), + receipt: ReceiptEnum::Data(DataReceipt { data_id: CryptoHash::default(), data: None }), + }) + .collect() +} + +fn gen_changes_helper( + rng: &mut impl Rng, + max_size: usize, + alphabet: &[u8], + max_length: u64, +) -> Vec<(Vec, Option>)> { + let mut state: HashMap, Vec> = HashMap::new(); + let mut result = Vec::new(); + let delete_probability = rng.gen_range(0.1, 0.5); + let size = rng.gen_range(0, max_size) + 1; + for _ in 0..size { + let key_length = rng.gen_range(1, max_length); + let key: Vec = (0..key_length).map(|_| *alphabet.choose(rng).unwrap()).collect(); + + let delete = rng.gen_range(0.0, 1.0) < delete_probability; + if delete { + let mut keys: Vec<_> = state.keys().cloned().collect(); + keys.push(key); + let key = keys.choose(rng).unwrap().clone(); + state.remove(&key); + result.push((key.clone(), None)); + } else { + let value_length = rng.gen_range(1, max_length); + let value: Vec = + (0..value_length).map(|_| *alphabet.choose(rng).unwrap()).collect(); + result.push((key.clone(), Some(value.clone()))); + state.insert(key, value); + } + } + result +} + +pub fn gen_changes(rng: &mut impl Rng, max_size: usize) -> Vec<(Vec, Option>)> { + let alphabet = &b"abcdefgh"[0..rng.gen_range(2, 8)]; + let max_length = rng.gen_range(2, 8); + gen_changes_helper(rng, max_size, alphabet, max_length) +} + +pub fn gen_larger_changes(rng: &mut impl Rng, max_size: usize) -> Vec<(Vec, Option>)> { + let alphabet = b"abcdefghijklmnopqrst"; + let max_length = rng.gen_range(10, 20); + gen_changes_helper(rng, max_size, alphabet, max_length) +} + +pub(crate) fn simplify_changes( + changes: &Vec<(Vec, Option>)>, +) -> Vec<(Vec, Option>)> { + let mut state: HashMap, Vec> = HashMap::new(); + for (key, value) in changes.iter() { + if let Some(value) = value { + state.insert(key.clone(), value.clone()); + } else { + state.remove(key); + } + } + let mut result: Vec<_> = state.into_iter().map(|(k, v)| (k, Some(v))).collect(); + result.sort(); + result +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/insert_delete.rs b/mock-enclave/src/skw-vm-store/src/trie/insert_delete.rs new file mode 100644 index 0000000..b45bd8f --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/insert_delete.rs @@ -0,0 +1,651 @@ +use std::collections::HashMap; + +use near_primitives::hash::{hash, CryptoHash}; + +use crate::trie::nibble_slice::NibbleSlice; +use crate::trie::{ + NodeHandle, RawTrieNode, RawTrieNodeWithSize, StorageHandle, StorageValueHandle, TrieNode, + TrieNodeWithSize, ValueHandle, +}; +use crate::{StorageError, Trie, TrieChanges}; + +pub(crate) struct NodesStorage { + nodes: Vec>, + values: Vec>>, + pub(crate) refcount_changes: HashMap, i32)>, +} + +const INVALID_STORAGE_HANDLE: &str = "invalid storage handle"; + +/// Local mutable storage that owns node objects. +impl NodesStorage { + pub fn new() -> NodesStorage { + NodesStorage { nodes: Vec::new(), refcount_changes: HashMap::new(), values: Vec::new() } + } + + fn destroy(&mut self, handle: StorageHandle) -> TrieNodeWithSize { + self.nodes + .get_mut(handle.0) + .expect(INVALID_STORAGE_HANDLE) + .take() + .expect(INVALID_STORAGE_HANDLE) + } + + pub fn node_ref(&self, handle: StorageHandle) -> &TrieNodeWithSize { + self.nodes + .get(handle.0) + .expect(INVALID_STORAGE_HANDLE) + .as_ref() + .expect(INVALID_STORAGE_HANDLE) + } + + fn node_mut(&mut self, handle: StorageHandle) -> &mut TrieNodeWithSize { + self.nodes + .get_mut(handle.0) + .expect(INVALID_STORAGE_HANDLE) + .as_mut() + .expect(INVALID_STORAGE_HANDLE) + } + + pub(crate) fn store(&mut self, node: TrieNodeWithSize) -> StorageHandle { + self.nodes.push(Some(node)); + StorageHandle(self.nodes.len() - 1) + } + + pub(crate) fn store_value(&mut self, value: Vec) -> StorageValueHandle { + self.values.push(Some(value)); + StorageValueHandle(self.values.len() - 1) + } + + pub(crate) fn value_ref(&self, handle: StorageValueHandle) -> &Vec { + self.values + .get(handle.0) + .expect(INVALID_STORAGE_HANDLE) + .as_ref() + .expect(INVALID_STORAGE_HANDLE) + } + + fn store_at(&mut self, handle: StorageHandle, node: TrieNodeWithSize) { + debug_assert!(self.nodes.get(handle.0).expect(INVALID_STORAGE_HANDLE).is_none()); + self.nodes[handle.0] = Some(node); + } +} + +enum FlattenNodesCrumb { + Entering, + AtChild(Box<[Option; 16]>, usize), + Exiting, +} + +impl Trie { + /// Allowed to mutate nodes in NodesStorage. + /// Insert while holding StorageHandles to NodesStorage is unsafe + pub(crate) fn insert( + &self, + memory: &mut NodesStorage, + node: StorageHandle, + partial: NibbleSlice<'_>, + value: Vec, + ) -> Result { + let root_handle = node; + let mut handle = node; + let mut value = Some(value); + let mut partial = partial; + let mut path = Vec::new(); + loop { + path.push(handle); + let TrieNodeWithSize { node, memory_usage } = memory.destroy(handle); + let children_memory_usage = memory_usage - node.memory_usage_direct(memory); + match node { + TrieNode::Empty => { + let value_handle = memory.store_value(value.take().unwrap()); + let leaf_node = TrieNode::Leaf( + partial.encoded(true).into_vec(), + ValueHandle::InMemory(value_handle), + ); + let memory_usage = leaf_node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize { node: leaf_node, memory_usage }); + break; + } + TrieNode::Branch(mut children, existing_value) => { + // If the key ends here, store the value in branch's value. + if partial.is_empty() { + if let Some(value) = &existing_value { + self.delete_value(memory, value)?; + } + let value_handle = memory.store_value(value.take().unwrap()); + let new_node = + TrieNode::Branch(children, Some(ValueHandle::InMemory(value_handle))); + let new_memory_usage = + children_memory_usage + new_node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize::new(new_node, new_memory_usage)); + break; + } else { + let idx = partial.at(0) as usize; + let child = children[idx].take(); + + let child = match child { + Some(NodeHandle::Hash(hash)) => { + self.move_node_to_mutable(memory, &hash)? + } + Some(NodeHandle::InMemory(handle)) => handle, + None => memory.store(TrieNodeWithSize::empty()), + }; + children[idx] = Some(NodeHandle::InMemory(child)); + Trie::calc_memory_usage_and_store( + memory, + handle, + children_memory_usage, + TrieNode::Branch(children, existing_value), + Some(child), + ); + handle = child; + partial = partial.mid(1); + continue; + } + } + TrieNode::Leaf(key, existing_value) => { + let existing_key = NibbleSlice::from_encoded(&key).0; + let common_prefix = partial.common_prefix(&existing_key); + if common_prefix == existing_key.len() && common_prefix == partial.len() { + // Equivalent leaf. + self.delete_value(memory, &existing_value)?; + let value_handle = memory.store_value(value.take().unwrap()); + let node = TrieNode::Leaf(key, ValueHandle::InMemory(value_handle)); + let memory_usage = node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize { node, memory_usage }); + break; + } else if common_prefix == 0 { + let mut children = Default::default(); + let children_memory_usage; + let branch_node = if existing_key.is_empty() { + children_memory_usage = 0; + TrieNode::Branch(children, Some(existing_value)) + } else { + let idx = existing_key.at(0) as usize; + let new_leaf = TrieNode::Leaf( + existing_key.mid(1).encoded(true).into_vec(), + existing_value, + ); + let memory_usage = new_leaf.memory_usage_direct(memory); + children_memory_usage = memory_usage; + children[idx] = Some(NodeHandle::InMemory( + memory.store(TrieNodeWithSize { node: new_leaf, memory_usage }), + )); + TrieNode::Branch(children, None) + }; + let memory_usage = + branch_node.memory_usage_direct(memory) + children_memory_usage; + memory + .store_at(handle, TrieNodeWithSize { node: branch_node, memory_usage }); + path.pop(); + continue; + } else if common_prefix == existing_key.len() { + let branch_node = + TrieNode::Branch(Default::default(), Some(existing_value)); + let memory_usage = branch_node.memory_usage_direct(memory); + let child = + memory.store(TrieNodeWithSize { node: branch_node, memory_usage }); + let new_node = TrieNode::Extension( + existing_key.encoded(false).into_vec(), + NodeHandle::InMemory(child), + ); + let memory_usage = new_node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize { node: new_node, memory_usage }); + handle = child; + partial = partial.mid(common_prefix); + continue; + } else { + // Partially shared prefix: convert to leaf and call recursively to add a branch. + let leaf_node = TrieNode::Leaf( + existing_key.mid(common_prefix).encoded(true).into_vec(), + existing_value, + ); + let leaf_memory_usage = leaf_node.memory_usage_direct(memory); + let child = + memory.store(TrieNodeWithSize::new(leaf_node, leaf_memory_usage)); + let node = TrieNode::Extension( + partial.encoded_leftmost(common_prefix, false).into_vec(), + NodeHandle::InMemory(child), + ); + let mem = node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize::new(node, mem)); + handle = child; + partial = partial.mid(common_prefix); + continue; + } + } + TrieNode::Extension(key, child) => { + let existing_key = NibbleSlice::from_encoded(&key).0; + let common_prefix = partial.common_prefix(&existing_key); + if common_prefix == 0 { + let idx = existing_key.at(0) as usize; + let mut children: Box<[Option; 16]> = Default::default(); + let child_memory_usage; + children[idx] = if existing_key.len() == 1 { + child_memory_usage = children_memory_usage; + Some(child) + } else { + let child = TrieNode::Extension( + existing_key.mid(1).encoded(false).into_vec(), + child, + ); + + child_memory_usage = + children_memory_usage + child.memory_usage_direct(memory); + Some(NodeHandle::InMemory( + memory.store(TrieNodeWithSize::new(child, child_memory_usage)), + )) + }; + let branch_node = TrieNode::Branch(children, None); + let memory_usage = + branch_node.memory_usage_direct(memory) + child_memory_usage; + memory.store_at(handle, TrieNodeWithSize::new(branch_node, memory_usage)); + path.pop(); + continue; + } else if common_prefix == existing_key.len() { + let child = match child { + NodeHandle::Hash(hash) => self.move_node_to_mutable(memory, &hash)?, + NodeHandle::InMemory(handle) => handle, + }; + let node = TrieNode::Extension(key, NodeHandle::InMemory(child)); + let memory_usage = node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize::new(node, memory_usage)); + handle = child; + partial = partial.mid(common_prefix); + continue; + } else { + // Partially shared prefix: covert to shorter extension and recursively add a branch. + let child_node = TrieNode::Extension( + existing_key.mid(common_prefix).encoded(false).into_vec(), + child, + ); + let child_memory_usage = + children_memory_usage + child_node.memory_usage_direct(memory); + let child = + memory.store(TrieNodeWithSize::new(child_node, child_memory_usage)); + let node = TrieNode::Extension( + existing_key.encoded_leftmost(common_prefix, false).into_vec(), + NodeHandle::InMemory(child), + ); + let memory_usage = node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize::new(node, memory_usage)); + handle = child; + partial = partial.mid(common_prefix); + continue; + } + } + } + } + for i in (0..path.len() - 1).rev() { + let node = path.get(i).unwrap(); + let child = path.get(i + 1).unwrap(); + let child_memory_usage = memory.node_ref(*child).memory_usage; + memory.node_mut(*node).memory_usage += child_memory_usage; + } + Ok(root_handle) + } + + /// On insert/delete, we want to recompute subtree sizes without touching nodes that aren't on + /// the path of the key inserted/deleted. This is relevant because reducing storage reads + /// saves time and makes fraud proofs smaller. + /// + /// Memory usage is recalculated in two steps: + /// 1. go down the trie, modify the node and subtract the next child on the path from memory usage + /// 2. go up the path and add new child's memory usage + fn calc_memory_usage_and_store( + memory: &mut NodesStorage, + handle: StorageHandle, + children_memory_usage: u64, + new_node: TrieNode, + old_child: Option, + ) { + let new_memory_usage = children_memory_usage + new_node.memory_usage_direct(memory) + - old_child.map(|child| memory.node_ref(child).memory_usage()).unwrap_or_default(); + memory.store_at(handle, TrieNodeWithSize::new(new_node, new_memory_usage)); + } + + /// Deletes a node from the trie which has key = `partial` given root node. + /// Returns (new root node or `None` if this was the node to delete, was it updated). + /// While deleting keeps track of all the removed / updated nodes in `death_row`. + pub(crate) fn delete( + &self, + memory: &mut NodesStorage, + node: StorageHandle, + partial: NibbleSlice<'_>, + ) -> Result { + let mut handle = node; + let mut partial = partial; + let root_node = handle; + let mut path: Vec = Vec::new(); + loop { + path.push(handle); + let TrieNodeWithSize { node, memory_usage } = memory.destroy(handle); + let children_memory_usage = memory_usage - node.memory_usage_direct(memory); + match node { + TrieNode::Empty => { + memory.store_at(handle, TrieNodeWithSize::empty()); + break; + } + TrieNode::Leaf(key, value) => { + if NibbleSlice::from_encoded(&key).0 == partial { + self.delete_value(memory, &value)?; + memory.store_at(handle, TrieNodeWithSize::empty()); + break; + } else { + let leaf_node = TrieNode::Leaf(key, value); + let memory_usage = leaf_node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize::new(leaf_node, memory_usage)); + break; + } + } + TrieNode::Branch(mut children, value) => { + if partial.is_empty() { + if let Some(value) = &value { + self.delete_value(memory, value)?; + } else { + } + if children.iter().filter(|&x| x.is_some()).count() == 0 { + memory.store_at(handle, TrieNodeWithSize::empty()); + break; + } else { + Trie::calc_memory_usage_and_store( + memory, + handle, + children_memory_usage, + TrieNode::Branch(children, None), + None, + ); + break; + } + } else { + let idx = partial.at(0) as usize; + if let Some(node_or_hash) = children[idx].take() { + let child = match node_or_hash { + NodeHandle::Hash(hash) => { + self.move_node_to_mutable(memory, &hash)? + } + NodeHandle::InMemory(node) => node, + }; + children[idx] = Some(NodeHandle::InMemory(child)); + Trie::calc_memory_usage_and_store( + memory, + handle, + children_memory_usage, + TrieNode::Branch(children, value), + Some(child), + ); + handle = child; + partial = partial.mid(1); + continue; + } else { + memory.store_at( + handle, + TrieNodeWithSize::new( + TrieNode::Branch(children, value), + memory_usage, + ), + ); + break; + } + } + } + TrieNode::Extension(key, child) => { + let (common_prefix, existing_len) = { + let existing_key = NibbleSlice::from_encoded(&key).0; + (existing_key.common_prefix(&partial), existing_key.len()) + }; + if common_prefix == existing_len { + let child = match child { + NodeHandle::Hash(hash) => self.move_node_to_mutable(memory, &hash)?, + NodeHandle::InMemory(node) => node, + }; + Trie::calc_memory_usage_and_store( + memory, + handle, + children_memory_usage, + TrieNode::Extension(key, NodeHandle::InMemory(child)), + Some(child), + ); + partial = partial.mid(existing_len); + handle = child; + continue; + } else { + memory.store_at( + handle, + TrieNodeWithSize::new(TrieNode::Extension(key, child), memory_usage), + ); + break; + } + } + } + } + self.fix_nodes(memory, path)?; + Ok(root_node) + } + + fn fix_nodes( + &self, + memory: &mut NodesStorage, + path: Vec, + ) -> Result<(), StorageError> { + let mut child_memory_usage = 0; + for handle in path.into_iter().rev() { + let TrieNodeWithSize { node, memory_usage } = memory.destroy(handle); + let memory_usage = memory_usage + child_memory_usage; + match node { + TrieNode::Empty => { + memory.store_at(handle, TrieNodeWithSize::empty()); + } + TrieNode::Leaf(key, value) => { + memory.store_at( + handle, + TrieNodeWithSize::new(TrieNode::Leaf(key, value), memory_usage), + ); + } + TrieNode::Branch(mut children, value) => { + children.iter_mut().for_each(|child| { + if let Some(NodeHandle::InMemory(h)) = child { + if let TrieNode::Empty = memory.node_ref(*h).node { + *child = None + } + } + }); + let num_children = children.iter().filter(|&x| x.is_some()).count(); + if num_children == 0 { + if let Some(value) = value { + let empty = NibbleSlice::new(&[]).encoded(true).into_vec(); + let leaf_node = TrieNode::Leaf(empty, value); + let memory_usage = leaf_node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize::new(leaf_node, memory_usage)); + } else { + memory.store_at(handle, TrieNodeWithSize::empty()); + } + } else if num_children == 1 && value.is_none() { + // Branch with one child becomes extension + // Extension followed by leaf becomes leaf + // Extension followed by extension becomes extension + let idx = + children.iter().enumerate().find(|(_i, x)| x.is_some()).unwrap().0; + let key = NibbleSlice::new(&[(idx << 4) as u8]) + .encoded_leftmost(1, false) + .into_vec(); + self.fix_extension_node( + memory, + handle, + key, + children[idx].take().unwrap(), + )?; + } else { + memory.store_at( + handle, + TrieNodeWithSize::new(TrieNode::Branch(children, value), memory_usage), + ); + } + } + TrieNode::Extension(key, child) => { + self.fix_extension_node(memory, handle, key, child)?; + } + } + child_memory_usage = memory.node_ref(handle).memory_usage; + } + Ok(()) + } + + fn fix_extension_node( + &self, + memory: &mut NodesStorage, + handle: StorageHandle, + key: Vec, + child: NodeHandle, + ) -> Result<(), StorageError> { + let child = match child { + NodeHandle::Hash(hash) => self.move_node_to_mutable(memory, &hash)?, + NodeHandle::InMemory(h) => h, + }; + let TrieNodeWithSize { node, memory_usage } = memory.destroy(child); + let child_child_memory_usage = memory_usage - node.memory_usage_direct(memory); + match node { + TrieNode::Empty => { + memory.store_at(handle, TrieNodeWithSize::empty()); + } + TrieNode::Leaf(child_key, value) => { + let key = NibbleSlice::from_encoded(&key) + .0 + .merge_encoded(&NibbleSlice::from_encoded(&child_key).0, true) + .into_vec(); + let new_node = TrieNode::Leaf(key, value); + let memory_usage = new_node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize::new(new_node, memory_usage)); + } + TrieNode::Branch(children, value) => { + memory.store_at( + child, + TrieNodeWithSize::new(TrieNode::Branch(children, value), memory_usage), + ); + let new_node = TrieNode::Extension(key, NodeHandle::InMemory(child)); + let memory_usage = memory_usage + new_node.memory_usage_direct(memory); + memory.store_at(handle, TrieNodeWithSize::new(new_node, memory_usage)); + } + TrieNode::Extension(child_key, child_child) => { + let key = NibbleSlice::from_encoded(&key) + .0 + .merge_encoded(&NibbleSlice::from_encoded(&child_key).0, false) + .into_vec(); + let new_node = TrieNode::Extension(key, child_child); + let memory_usage = new_node.memory_usage_direct(memory) + child_child_memory_usage; + memory.store_at(handle, TrieNodeWithSize::new(new_node, memory_usage)); + } + } + Ok(()) + } + + pub(crate) fn flatten_nodes( + old_root: &CryptoHash, + memory: NodesStorage, + node: StorageHandle, + ) -> Result { + let mut stack: Vec<(StorageHandle, FlattenNodesCrumb)> = Vec::new(); + stack.push((node, FlattenNodesCrumb::Entering)); + let mut last_hash = CryptoHash::default(); + let mut buffer: Vec = Vec::new(); + let mut memory = memory; + while let Some((node, position)) = stack.pop() { + let node_with_size = memory.node_ref(node); + let memory_usage = node_with_size.memory_usage; + let raw_node = match &node_with_size.node { + TrieNode::Empty => { + last_hash = Trie::empty_root(); + continue; + } + TrieNode::Branch(children, value) => match position { + FlattenNodesCrumb::Entering => { + let new_children: [Option; 16] = Default::default(); + stack.push((node, FlattenNodesCrumb::AtChild(Box::new(new_children), 0))); + continue; + } + FlattenNodesCrumb::AtChild(mut new_children, mut i) => { + if i > 0 && children[i - 1].is_some() { + new_children[i - 1] = Some(last_hash); + } + while i < 16 { + match children[i].as_ref() { + Some(NodeHandle::InMemory(_)) => { + break; + } + Some(NodeHandle::Hash(hash)) => { + new_children[i] = Some(*hash); + } + None => {} + } + i += 1; + } + if i < 16 { + match children[i].as_ref() { + Some(NodeHandle::InMemory(child_node)) => { + stack.push(( + node, + FlattenNodesCrumb::AtChild(new_children, i + 1), + )); + stack.push((*child_node, FlattenNodesCrumb::Entering)); + continue; + } + _ => unreachable!(), + } + } + let new_value = + value.clone().map(|value| Trie::flatten_value(&mut memory, value)); + + RawTrieNode::Branch(*new_children, new_value) + } + FlattenNodesCrumb::Exiting => unreachable!(), + }, + TrieNode::Extension(key, child) => match position { + FlattenNodesCrumb::Entering => match child { + NodeHandle::InMemory(child) => { + stack.push((node, FlattenNodesCrumb::Exiting)); + stack.push((*child, FlattenNodesCrumb::Entering)); + continue; + } + NodeHandle::Hash(hash) => RawTrieNode::Extension(key.clone(), *hash), + }, + FlattenNodesCrumb::Exiting => RawTrieNode::Extension(key.clone(), last_hash), + _ => unreachable!(), + }, + TrieNode::Leaf(key, value) => { + let key = key.clone(); + let value = value.clone(); + let (value_length, value_hash) = Trie::flatten_value(&mut memory, value); + RawTrieNode::Leaf(key, value_length, value_hash) + } + }; + let raw_node_with_size = RawTrieNodeWithSize { node: raw_node, memory_usage }; + raw_node_with_size.encode_into(&mut buffer).expect("Encode can never fail"); + let key = hash(&buffer); + + let (_value, rc) = + memory.refcount_changes.entry(key).or_insert_with(|| (buffer.clone(), 0)); + *rc += 1; + buffer.clear(); + last_hash = key; + } + let (insertions, deletions) = + Trie::convert_to_insertions_and_deletions(memory.refcount_changes); + Ok(TrieChanges { old_root: *old_root, new_root: last_hash, insertions, deletions }) + } + + fn flatten_value(memory: &mut NodesStorage, value: ValueHandle) -> (u32, CryptoHash) { + match value { + ValueHandle::InMemory(value_handle) => { + let value = memory.value_ref(value_handle).clone(); + let value_length = value.len() as u32; + let value_hash = hash(&value); + let (_value, rc) = + memory.refcount_changes.entry(value_hash).or_insert_with(|| (value, 0)); + *rc += 1; + (value_length, value_hash) + } + ValueHandle::HashAndSize(value_length, value_hash) => (value_length, value_hash), + } + } +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/iterator.rs b/mock-enclave/src/skw-vm-store/src/trie/iterator.rs new file mode 100644 index 0000000..1750b00 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/iterator.rs @@ -0,0 +1,480 @@ +use near_primitives::hash::CryptoHash; + +use crate::trie::nibble_slice::NibbleSlice; +use crate::trie::{TrieNode, TrieNodeWithSize, ValueHandle}; +use crate::{StorageError, Trie}; + +#[derive(Debug)] +struct Crumb { + node: TrieNodeWithSize, + status: CrumbStatus, +} + +#[derive(Clone, Eq, PartialEq, Debug)] +pub(crate) enum CrumbStatus { + Entering, + At, + AtChild(usize), + Exiting, +} + +impl Crumb { + fn increment(&mut self) { + self.status = match (&self.status, &self.node.node) { + (_, &TrieNode::Empty) => CrumbStatus::Exiting, + (&CrumbStatus::Entering, _) => CrumbStatus::At, + (&CrumbStatus::At, &TrieNode::Branch(_, _)) => CrumbStatus::AtChild(0), + (&CrumbStatus::AtChild(x), &TrieNode::Branch(_, _)) if x < 15 => { + CrumbStatus::AtChild(x + 1) + } + _ => CrumbStatus::Exiting, + } + } +} + +pub struct TrieIterator<'a> { + trie: &'a Trie, + trail: Vec, + pub(crate) key_nibbles: Vec, + root: CryptoHash, +} + +pub type TrieItem = (Vec, Vec); + +/// Item extracted from Trie during depth first traversal, corresponding to some Trie node. +pub struct TrieTraversalItem { + /// Hash of the node. + pub hash: CryptoHash, + /// Key of the node if it stores a value. + pub key: Option>, +} + +impl<'a> TrieIterator<'a> { + #![allow(clippy::new_ret_no_self)] + /// Create a new iterator. + pub fn new(trie: &'a Trie, root: &CryptoHash) -> Result { + let mut r = TrieIterator { + trie, + trail: Vec::with_capacity(8), + key_nibbles: Vec::with_capacity(64), + root: *root, + }; + let node = trie.retrieve_node(root)?; + r.descend_into_node(node); + Ok(r) + } + + /// Position the iterator on the first element with key => `key`. + pub fn seek>(&mut self, key: K) -> Result<(), StorageError> { + self.seek_nibble_slice(NibbleSlice::new(key.as_ref())).map(drop) + } + + /// Returns the hash of the last node + pub(crate) fn seek_nibble_slice( + &mut self, + mut key: NibbleSlice<'_>, + ) -> Result { + self.trail.clear(); + self.key_nibbles.clear(); + let mut hash = self.root; + loop { + let node = self.trie.retrieve_node(&hash)?; + self.trail.push(Crumb { status: CrumbStatus::Entering, node }); + let Crumb { status, node } = self.trail.last_mut().unwrap(); + match &node.node { + TrieNode::Empty => break, + TrieNode::Leaf(leaf_key, _) => { + let existing_key = NibbleSlice::from_encoded(leaf_key).0; + if existing_key < key { + self.key_nibbles.extend(existing_key.iter()); + *status = CrumbStatus::Exiting; + } + break; + } + TrieNode::Branch(children, _) => { + if key.is_empty() { + break; + } else { + let idx = key.at(0) as usize; + self.key_nibbles.push(key.at(0)); + *status = CrumbStatus::AtChild(idx as usize); + if let Some(child) = &children[idx] { + hash = *child.unwrap_hash(); + key = key.mid(1); + } else { + break; + } + } + } + TrieNode::Extension(ext_key, child) => { + let existing_key = NibbleSlice::from_encoded(ext_key).0; + if key.starts_with(&existing_key) { + key = key.mid(existing_key.len()); + hash = *child.unwrap_hash(); + *status = CrumbStatus::At; + self.key_nibbles.extend(existing_key.iter()); + } else { + if existing_key < key { + *status = CrumbStatus::Exiting; + self.key_nibbles.extend(existing_key.iter()); + } + break; + } + } + } + } + Ok(hash) + } + + fn descend_into_node(&mut self, node: TrieNodeWithSize) { + self.trail.push(Crumb { status: CrumbStatus::Entering, node }); + } + + fn key(&self) -> Vec { + let mut result = >::with_capacity(self.key_nibbles.len() / 2); + for i in (1..self.key_nibbles.len()).step_by(2) { + result.push(self.key_nibbles[i - 1] * 16 + self.key_nibbles[i]); + } + result + } + + fn has_value(&self) -> bool { + match self.trail.last() { + Some(b) => match (&b.status, &b.node.node) { + (CrumbStatus::At, TrieNode::Branch(_, Some(_))) => true, + (CrumbStatus::At, TrieNode::Leaf(_, _)) => true, + _ => false, + }, + None => false, // Trail finished + } + } + + fn iter_step(&mut self) -> Option { + self.trail.last_mut()?.increment(); + let b = self.trail.last().expect("Trail finished."); + match (b.status.clone(), &b.node.node) { + (CrumbStatus::Exiting, n) => { + match n { + TrieNode::Leaf(ref key, _) | TrieNode::Extension(ref key, _) => { + let existing_key = NibbleSlice::from_encoded(key).0; + let l = self.key_nibbles.len(); + self.key_nibbles.truncate(l - existing_key.len()); + } + TrieNode::Branch(_, _) => { + self.key_nibbles.pop(); + } + _ => {} + } + Some(IterStep::PopTrail) + } + (CrumbStatus::At, TrieNode::Branch(_, Some(value))) => { + let hash = match value { + ValueHandle::HashAndSize(_, hash) => *hash, + ValueHandle::InMemory(_node) => unreachable!(), + }; + Some(IterStep::Value(hash)) + } + (CrumbStatus::At, TrieNode::Branch(_, None)) => Some(IterStep::Continue), + (CrumbStatus::At, TrieNode::Leaf(key, value)) => { + let hash = match value { + ValueHandle::HashAndSize(_, hash) => *hash, + ValueHandle::InMemory(_node) => unreachable!(), + }; + let key = NibbleSlice::from_encoded(key).0; + self.key_nibbles.extend(key.iter()); + Some(IterStep::Value(hash)) + } + (CrumbStatus::At, TrieNode::Extension(key, child)) => { + let hash = *child.unwrap_hash(); + let key = NibbleSlice::from_encoded(key).0; + self.key_nibbles.extend(key.iter()); + Some(IterStep::Descend(hash)) + } + (CrumbStatus::AtChild(i), TrieNode::Branch(children, _)) if children[i].is_some() => { + match i { + 0 => self.key_nibbles.push(0), + i => *self.key_nibbles.last_mut().expect("Pushed child value before") = i as u8, + } + let hash = *children[i].as_ref().unwrap().unwrap_hash(); + Some(IterStep::Descend(hash)) + } + (CrumbStatus::AtChild(i), TrieNode::Branch(_, _)) => { + if i == 0 { + self.key_nibbles.push(0); + } + Some(IterStep::Continue) + } + _ => panic!("Should never see Entering or AtChild without a Branch here."), + } + } + + fn common_prefix(str1: &[u8], str2: &[u8]) -> usize { + let mut prefix = 0; + while prefix < str1.len() && prefix < str2.len() && str1[prefix] == str2[prefix] { + prefix += 1; + } + prefix + } + + /// Note that path_begin and path_end are not bytes, they are nibbles + /// Visits all nodes belonging to the interval [path_begin, path_end) in depth-first search + /// order and return key-value pairs for each visited node with value stored + /// Used to generate split states for re-sharding + pub(crate) fn get_trie_items( + &mut self, + path_begin: &[u8], + path_end: &[u8], + ) -> Result, StorageError> { + let path_begin_encoded = NibbleSlice::encode_nibbles(path_begin, false); + self.seek_nibble_slice(NibbleSlice::from_encoded(&path_begin_encoded).0)?; + + let mut trie_items = vec![]; + for item in self { + let trie_item = item?; + let key_encoded: Vec<_> = NibbleSlice::new(&trie_item.0).iter().collect(); + if &key_encoded[..] >= path_end { + return Ok(trie_items); + } + trie_items.push(trie_item); + } + Ok(trie_items) + } + + /// Visits all nodes belonging to the interval [path_begin, path_end) in depth-first search + /// order and return TrieTraversalItem for each visited node. + /// Used to generate and apply state parts for state sync. + pub(crate) fn visit_nodes_interval( + &mut self, + path_begin: &[u8], + path_end: &[u8], + ) -> Result, StorageError> { + let path_begin_encoded = NibbleSlice::encode_nibbles(path_begin, true); + let last_hash = self.seek_nibble_slice(NibbleSlice::from_encoded(&path_begin_encoded).0)?; + let mut prefix = Self::common_prefix(path_end, &self.key_nibbles); + if self.key_nibbles[prefix..] >= path_end[prefix..] { + return Ok(vec![]); + } + let mut nodes_list = Vec::new(); + + // Actually (self.key_nibbles[..] == path_begin) always because path_begin always ends in a node + if &self.key_nibbles[..] >= path_begin { + nodes_list.push(TrieTraversalItem { + hash: last_hash, + key: self.has_value().then(|| self.key()), + }); + } + + loop { + let iter_step = match self.iter_step() { + Some(iter_step) => iter_step, + None => break, + }; + match iter_step { + IterStep::PopTrail => { + self.trail.pop(); + prefix = std::cmp::min(self.key_nibbles.len(), prefix); + } + IterStep::Descend(hash) => { + prefix += Self::common_prefix(&path_end[prefix..], &self.key_nibbles[prefix..]); + if self.key_nibbles[prefix..] >= path_end[prefix..] { + break; + } + let node = self.trie.retrieve_node(&hash)?; + self.descend_into_node(node); + nodes_list.push(TrieTraversalItem { hash, key: None }); + } + IterStep::Continue => {} + IterStep::Value(hash) => { + self.trie.retrieve_raw_bytes(&hash)?; + nodes_list.push(TrieTraversalItem { + hash, + key: self.has_value().then(|| self.key()), + }); + } + } + } + Ok(nodes_list) + } +} + +enum IterStep { + Continue, + PopTrail, + Descend(CryptoHash), + Value(CryptoHash), +} + +impl<'a> Iterator for TrieIterator<'a> { + type Item = Result; + + fn next(&mut self) -> Option { + loop { + let iter_step = self.iter_step()?; + match iter_step { + IterStep::PopTrail => { + self.trail.pop(); + } + IterStep::Descend(hash) => match self.trie.retrieve_node(&hash) { + Ok(node) => self.descend_into_node(node), + Err(e) => return Some(Err(e)), + }, + IterStep::Continue => {} + IterStep::Value(hash) => { + return Some( + self.trie.retrieve_raw_bytes(&hash).map(|value| (self.key(), value)), + ) + } + } + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use rand::seq::SliceRandom; + use rand::Rng; + + use near_primitives::hash::CryptoHash; + + use crate::test_utils::{ + create_tries, create_tries_complex, gen_changes, simplify_changes, test_populate_trie, + }; + use crate::trie::iterator::IterStep; + use crate::trie::nibble_slice::NibbleSlice; + use crate::Trie; + use near_primitives::shard_layout::ShardUId; + + #[test] + fn test_iterator() { + let mut rng = rand::thread_rng(); + for _ in 0..100 { + let tries = create_tries_complex(1, 2); + let shard_uid = ShardUId { version: 1, shard_id: 0 }; + let trie = tries.get_trie_for_shard(shard_uid); + let trie_changes = gen_changes(&mut rng, 10); + let trie_changes = simplify_changes(&trie_changes); + + let mut map = BTreeMap::new(); + for (key, value) in trie_changes.iter() { + if let Some(value) = value { + map.insert(key.clone(), value.clone()); + } + } + let state_root = + test_populate_trie(&tries, &Trie::empty_root(), shard_uid, trie_changes.clone()); + + { + let result1: Vec<_> = trie.iter(&state_root).unwrap().map(Result::unwrap).collect(); + let result2: Vec<_> = map.iter().map(|(k, v)| (k.clone(), v.clone())).collect(); + assert_eq!(result1, result2); + } + test_seek(&trie, &map, &state_root, &[]); + + let empty_vec = vec![]; + let max_key = map.keys().max().unwrap_or(&empty_vec); + let min_key = map.keys().min().unwrap_or(&empty_vec); + test_get_trie_items(&trie, &map, &state_root, &[], &[]); + test_get_trie_items(&trie, &map, &state_root, min_key, max_key); + for (seek_key, _) in trie_changes.iter() { + test_seek(&trie, &map, &state_root, seek_key); + test_get_trie_items(&trie, &map, &state_root, min_key, seek_key); + test_get_trie_items(&trie, &map, &state_root, seek_key, max_key); + } + for _ in 0..20 { + let alphabet = &b"abcdefgh"[0..rng.gen_range(2, 8)]; + let key_length = rng.gen_range(1, 8); + let seek_key: Vec = + (0..key_length).map(|_| *alphabet.choose(&mut rng).unwrap()).collect(); + test_seek(&trie, &map, &state_root, &seek_key); + + let seek_key2: Vec = + (0..key_length).map(|_| *alphabet.choose(&mut rng).unwrap()).collect(); + let path_begin = seek_key.clone().min(seek_key2.clone()); + let path_end = seek_key.clone().max(seek_key2.clone()); + test_get_trie_items(&trie, &map, &state_root, &path_begin, &path_end); + } + } + } + + fn test_get_trie_items( + trie: &Trie, + map: &BTreeMap, Vec>, + state_root: &CryptoHash, + path_begin: &[u8], + path_end: &[u8], + ) { + let path_begin_nibbles: Vec<_> = NibbleSlice::new(path_begin).iter().collect(); + let path_end_nibbles: Vec<_> = NibbleSlice::new(path_end).iter().collect(); + let result1 = trie + .iter(state_root) + .unwrap() + .get_trie_items(&path_begin_nibbles, &path_end_nibbles) + .unwrap(); + let result2: Vec<_> = map + .range(path_begin.to_vec()..path_end.to_vec()) + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); + assert_eq!(result1, result2); + + // test when path_end ends in [16] + let result1 = + trie.iter(state_root).unwrap().get_trie_items(&path_begin_nibbles, &[16u8]).unwrap(); + let result2: Vec<_> = + map.range(path_begin.to_vec()..).map(|(k, v)| (k.clone(), v.clone())).collect(); + assert_eq!(result1, result2); + } + + fn test_seek( + trie: &Trie, + map: &BTreeMap, Vec>, + state_root: &CryptoHash, + seek_key: &[u8], + ) { + let mut iterator = trie.iter(state_root).unwrap(); + iterator.seek(&seek_key).unwrap(); + let result1: Vec<_> = iterator.map(Result::unwrap).take(5).collect(); + let result2: Vec<_> = + map.range(seek_key.to_vec()..).map(|(k, v)| (k.clone(), v.clone())).take(5).collect(); + assert_eq!(result1, result2); + } + + #[test] + fn test_has_value() { + let mut rng = rand::thread_rng(); + for _ in 0..100 { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let trie_changes = gen_changes(&mut rng, 10); + let trie_changes = simplify_changes(&trie_changes); + let state_root = test_populate_trie( + &tries, + &Trie::empty_root(), + ShardUId::single_shard(), + trie_changes.clone(), + ); + let mut iterator = trie.iter(&state_root).unwrap(); + loop { + let iter_step = match iterator.iter_step() { + Some(iter_step) => iter_step, + None => break, + }; + match iter_step { + IterStep::Value(_) => assert!(iterator.has_value()), + _ => assert!(!iterator.has_value()), + } + match iter_step { + IterStep::PopTrail => { + iterator.trail.pop(); + } + IterStep::Descend(hash) => match iterator.trie.retrieve_node(&hash) { + Ok(node) => iterator.descend_into_node(node), + Err(e) => panic!("Unexpected error: {}", e), + }, + _ => {} + } + } + } + } +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/mod.rs b/mock-enclave/src/skw-vm-store/src/trie/mod.rs new file mode 100644 index 0000000..5936f82 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/mod.rs @@ -0,0 +1,1175 @@ +use std::cell::RefCell; +use std::cmp::Ordering; +use std::collections::HashMap; +use std::fmt; +use std::io::{Cursor, Read, Write}; +use std::sync::Arc; + +use borsh::{BorshDeserialize, BorshSerialize}; +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; + +use near_primitives::challenge::PartialState; +use near_primitives::contract::ContractCode; +use near_primitives::hash::{hash, CryptoHash}; +pub use near_primitives::shard_layout::ShardUId; +use near_primitives::types::{StateRoot, StateRootNode}; + +use crate::trie::insert_delete::NodesStorage; +use crate::trie::iterator::TrieIterator; +use crate::trie::nibble_slice::NibbleSlice; +pub use crate::trie::shard_tries::{KeyForStateChanges, ShardTries, WrappedTrieChanges}; +use crate::trie::trie_storage::{ + TouchedNodesCounter, TrieMemoryPartialStorage, TrieRecordingStorage, TrieStorage, +}; +pub(crate) use crate::trie::trie_storage::{TrieCache, TrieCachingStorage}; +use crate::StorageError; + +mod insert_delete; +pub mod iterator; +mod nibble_slice; +mod shard_tries; +pub mod split_state; +mod state_parts; +mod trie_storage; +pub mod update; + +#[cfg(test)] +mod trie_tests; + +const POISONED_LOCK_ERR: &str = "The lock was poisoned."; + +/// For fraud proofs +#[derive(Debug, Clone)] +pub struct PartialStorage { + pub nodes: PartialState, +} + +#[derive(Clone, Hash, Debug, Copy)] +pub(crate) struct StorageHandle(usize); + +#[derive(Clone, Hash, Debug, Copy)] +pub(crate) struct StorageValueHandle(usize); + +pub struct TrieCosts { + pub byte_of_key: u64, + pub byte_of_value: u64, + pub node_cost: u64, +} + +const TRIE_COSTS: TrieCosts = TrieCosts { byte_of_key: 2, byte_of_value: 1, node_cost: 50 }; + +#[derive(Clone, Hash, Debug)] +enum NodeHandle { + InMemory(StorageHandle), + Hash(CryptoHash), +} + +impl NodeHandle { + fn unwrap_hash(&self) -> &CryptoHash { + match self { + Self::Hash(hash) => hash, + Self::InMemory(_) => unreachable!(), + } + } +} + +#[derive(Clone, Hash, Debug)] +enum ValueHandle { + InMemory(StorageValueHandle), + HashAndSize(u32, CryptoHash), +} + +#[derive(Clone, Hash, Debug)] +enum TrieNode { + /// Null trie node. Could be an empty root or an empty branch entry. + Empty, + /// Key and value of the leaf node. + Leaf(Vec, ValueHandle), + /// Branch of 16 possible children and value if key ends here. + Branch(Box<[Option; 16]>, Option), + /// Key and child of extension. + Extension(Vec, NodeHandle), +} + +#[derive(Clone, Debug)] +pub struct TrieNodeWithSize { + node: TrieNode, + pub memory_usage: u64, +} + +impl TrieNodeWithSize { + fn from_raw(rc_node: RawTrieNodeWithSize) -> TrieNodeWithSize { + TrieNodeWithSize { node: TrieNode::new(rc_node.node), memory_usage: rc_node.memory_usage } + } + + fn new(node: TrieNode, memory_usage: u64) -> TrieNodeWithSize { + TrieNodeWithSize { node, memory_usage } + } + + fn memory_usage(&self) -> u64 { + self.memory_usage + } + + fn empty() -> TrieNodeWithSize { + TrieNodeWithSize { node: TrieNode::Empty, memory_usage: 0 } + } +} + +impl TrieNode { + fn new(rc_node: RawTrieNode) -> TrieNode { + match rc_node { + RawTrieNode::Leaf(key, value_length, value_hash) => { + TrieNode::Leaf(key, ValueHandle::HashAndSize(value_length, value_hash)) + } + RawTrieNode::Branch(children, value) => { + let mut new_children: Box<[Option; 16]> = Default::default(); + for i in 0..children.len() { + new_children[i] = children[i].map(NodeHandle::Hash); + } + TrieNode::Branch( + new_children, + value.map(|(value_length, value_hash)| { + ValueHandle::HashAndSize(value_length, value_hash) + }), + ) + } + RawTrieNode::Extension(key, child) => TrieNode::Extension(key, NodeHandle::Hash(child)), + } + } + + fn print( + &self, + f: &mut dyn fmt::Write, + memory: &NodesStorage, + spaces: &mut String, + ) -> fmt::Result { + match self { + TrieNode::Empty => { + write!(f, "{}Empty", spaces)?; + } + TrieNode::Leaf(key, _value) => { + let slice = NibbleSlice::from_encoded(key); + write!(f, "{}Leaf({:?}, val)", spaces, slice.0)?; + } + TrieNode::Branch(children, value) => { + writeln!( + f, + "{}Branch({}){{", + spaces, + if value.is_some() { "Some" } else { "None" } + )?; + spaces.push(' '); + for (idx, child) in + children.iter().enumerate().filter(|(_idx, child)| child.is_some()) + { + let child = child.as_ref().unwrap(); + write!(f, "{}{:01x}->", spaces, idx)?; + match child { + NodeHandle::Hash(hash) => { + write!(f, "{}", hash)?; + } + NodeHandle::InMemory(handle) => { + let child = &memory.node_ref(*handle).node; + child.print(f, memory, spaces)?; + } + } + writeln!(f)?; + } + spaces.remove(spaces.len() - 1); + write!(f, "{}}}", spaces)?; + } + TrieNode::Extension(key, child) => { + let slice = NibbleSlice::from_encoded(key); + writeln!(f, "{}Extension({:?})", spaces, slice)?; + spaces.push(' '); + match child { + NodeHandle::Hash(hash) => { + write!(f, "{}{}", spaces, hash)?; + } + NodeHandle::InMemory(handle) => { + let child = &memory.node_ref(*handle).node; + child.print(f, memory, spaces)?; + } + } + writeln!(f)?; + spaces.remove(spaces.len() - 1); + } + } + Ok(()) + } + + #[allow(dead_code)] + fn deep_to_string(&self, memory: &NodesStorage) -> String { + let mut buf = String::new(); + self.print(&mut buf, memory, &mut "".to_string()).expect("printing failed"); + buf + } + + fn memory_usage_for_value_length(value_length: u64) -> u64 { + value_length * TRIE_COSTS.byte_of_value + TRIE_COSTS.node_cost + } + + fn memory_usage_value(value: &ValueHandle, memory: Option<&NodesStorage>) -> u64 { + let value_length = match value { + ValueHandle::InMemory(handle) => memory + .expect("InMemory nodes exist, but storage is not provided") + .value_ref(*handle) + .len() as u64, + ValueHandle::HashAndSize(value_length, _value_hash) => *value_length as u64, + }; + Self::memory_usage_for_value_length(value_length) + } + + fn memory_usage_direct_no_memory(&self) -> u64 { + self.memory_usage_direct_internal(None) + } + + fn memory_usage_direct(&self, memory: &NodesStorage) -> u64 { + self.memory_usage_direct_internal(Some(memory)) + } + + fn memory_usage_direct_internal(&self, memory: Option<&NodesStorage>) -> u64 { + match self { + TrieNode::Empty => { + // DEVNOTE: empty nodes don't exist in storage. + // In the in-memory implementation Some(TrieNode::Empty) and None are interchangeable as + // children of branch nodes which means cost has to be 0 + 0 + } + TrieNode::Leaf(key, value) => { + TRIE_COSTS.node_cost + + (key.len() as u64) * TRIE_COSTS.byte_of_key + + Self::memory_usage_value(value, memory) + } + TrieNode::Branch(_children, value) => { + TRIE_COSTS.node_cost + + value.as_ref().map_or(0, |value| Self::memory_usage_value(value, memory)) + } + TrieNode::Extension(key, _child) => { + TRIE_COSTS.node_cost + (key.len() as u64) * TRIE_COSTS.byte_of_key + } + } + } +} + +#[derive(Debug, Eq, PartialEq)] +#[allow(clippy::large_enum_variant)] +enum RawTrieNode { + Leaf(Vec, u32, CryptoHash), + Branch([Option; 16], Option<(u32, CryptoHash)>), + Extension(Vec, CryptoHash), +} + +/// Trie node + memory cost of its subtree +/// memory_usage is serialized, stored, and contributes to hash +#[derive(Debug, Eq, PartialEq)] +struct RawTrieNodeWithSize { + node: RawTrieNode, + memory_usage: u64, +} + +const LEAF_NODE: u8 = 0; +const BRANCH_NODE_NO_VALUE: u8 = 1; +const BRANCH_NODE_WITH_VALUE: u8 = 2; +const EXTENSION_NODE: u8 = 3; + +fn decode_children(cursor: &mut Cursor<&[u8]>) -> Result<[Option; 16], std::io::Error> { + let mut children: [Option; 16] = Default::default(); + let bitmap = cursor.read_u16::()?; + let mut pos = 1; + for child in &mut children { + if bitmap & pos != 0 { + let mut arr = [0; 32]; + cursor.read_exact(&mut arr)?; + *child = Some(CryptoHash::try_from(&arr[..]).unwrap()); + } + pos <<= 1; + } + Ok(children) +} + +impl RawTrieNode { + fn encode_into(&self, out: &mut Vec) -> Result<(), std::io::Error> { + let mut cursor = Cursor::new(out); + // size in state_parts = size + 8 for RawTrieNodeWithSize + 8 for borsh vector length + match &self { + // size <= 1 + 4 + 4 + 32 + key_length + value_length + RawTrieNode::Leaf(key, value_length, value_hash) => { + cursor.write_u8(LEAF_NODE)?; + cursor.write_u32::(key.len() as u32)?; + cursor.write_all(key)?; + cursor.write_u32::(*value_length)?; + cursor.write_all(value_hash.as_ref())?; + } + // size <= 1 + 4 + 32 + value_length + 2 + 32 * num_children + RawTrieNode::Branch(children, value) => { + if let Some((value_length, value_hash)) = value { + cursor.write_u8(BRANCH_NODE_WITH_VALUE)?; + cursor.write_u32::(*value_length)?; + cursor.write_all(value_hash.as_ref())?; + } else { + cursor.write_u8(BRANCH_NODE_NO_VALUE)?; + } + let mut bitmap: u16 = 0; + let mut pos: u16 = 1; + for child in children.iter() { + if child.is_some() { + bitmap |= pos + } + pos <<= 1; + } + cursor.write_u16::(bitmap)?; + for child in children.iter() { + if let Some(hash) = child { + cursor.write_all(hash.as_ref())?; + } + } + } + // size <= 1 + 4 + key_length + 32 + RawTrieNode::Extension(key, child) => { + cursor.write_u8(EXTENSION_NODE)?; + cursor.write_u32::(key.len() as u32)?; + cursor.write_all(key)?; + cursor.write_all(child.as_ref())?; + } + } + Ok(()) + } + + #[allow(dead_code)] + fn encode(&self) -> Result, std::io::Error> { + let mut out = Vec::new(); + self.encode_into(&mut out)?; + Ok(out) + } + + fn decode(bytes: &[u8]) -> Result { + let mut cursor = Cursor::new(bytes); + match cursor.read_u8()? { + LEAF_NODE => { + let key_length = cursor.read_u32::()?; + let mut key = vec![0; key_length as usize]; + cursor.read_exact(&mut key)?; + let value_length = cursor.read_u32::()?; + let mut arr = [0; 32]; + cursor.read_exact(&mut arr)?; + let value_hash = CryptoHash(arr); + Ok(RawTrieNode::Leaf(key, value_length, value_hash)) + } + BRANCH_NODE_NO_VALUE => { + let children = decode_children(&mut cursor)?; + Ok(RawTrieNode::Branch(children, None)) + } + BRANCH_NODE_WITH_VALUE => { + let value_length = cursor.read_u32::()?; + let mut arr = [0; 32]; + cursor.read_exact(&mut arr)?; + let value_hash = CryptoHash(arr); + let children = decode_children(&mut cursor)?; + Ok(RawTrieNode::Branch(children, Some((value_length, value_hash)))) + } + EXTENSION_NODE => { + let key_length = cursor.read_u32::()?; + let mut key = vec![0; key_length as usize]; + cursor.read_exact(&mut key)?; + let mut child = [0; 32]; + cursor.read_exact(&mut child)?; + Ok(RawTrieNode::Extension(key, CryptoHash(child))) + } + _ => Err(std::io::Error::new(std::io::ErrorKind::Other, "Wrong type")), + } + } +} + +impl RawTrieNodeWithSize { + fn encode_into(&self, out: &mut Vec) -> Result<(), std::io::Error> { + self.node.encode_into(out)?; + out.write_u64::(self.memory_usage) + } + + #[allow(dead_code)] + fn encode(&self) -> Result, std::io::Error> { + let mut out = Vec::new(); + self.encode_into(&mut out)?; + Ok(out) + } + + fn decode(bytes: &[u8]) -> Result { + if bytes.len() < 8 { + return Err(std::io::Error::new(std::io::ErrorKind::Other, "Wrong type")); + } + let node = RawTrieNode::decode(&bytes[0..bytes.len() - 8])?; + let mut arr: [u8; 8] = Default::default(); + arr.copy_from_slice(&bytes[bytes.len() - 8..]); + let memory_usage = u64::from_le_bytes(arr); + Ok(RawTrieNodeWithSize { node, memory_usage }) + } +} + +pub struct Trie { + pub(crate) storage: Box, + pub counter: TouchedNodesCounter, +} + +/// Stores reference count change for some key-value pair in DB. +#[derive(BorshSerialize, BorshDeserialize, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct TrieRefcountChange { + /// Hash of trie_node_or_value and part of the DB key. + /// Used for uniting with shard id to get actual DB key. + trie_node_or_value_hash: CryptoHash, + /// DB value. Can be either serialized RawTrieNodeWithSize or value corresponding to + /// some TrieKey. + trie_node_or_value: Vec, + /// Reference count difference which will be added to the total refcount if it corresponds to + /// insertion and subtracted from it in the case of deletion. + rc: u32, +} + +/// +/// TrieChanges stores delta for refcount. +/// Multiple versions of the state work the following way: +/// __changes1___state1 +/// state0 / +/// \__changes2___state2 +/// +/// To store state0, state1 and state2, apply insertions from changes1 and changes2 +/// +/// Then, to discard state2, apply insertions from changes2 as deletions +/// +/// Then, to discard state0, apply deletions from changes1. +/// deleting state0 while both state1 and state2 exist is not possible. +/// Applying deletions from changes1 while state2 exists makes accessing state2 invalid. +/// +/// +/// create a fork -> apply insertions +/// resolve a fork -> apply opposite of insertions +/// discard old parent which has no forks from it -> apply deletions +/// +/// Having old_root and values in deletions allows to apply TrieChanges in reverse +/// +/// StoreUpdate are the changes from current state refcount to refcount + delta. +#[derive(BorshSerialize, BorshDeserialize, Clone, PartialEq, Eq, Debug)] +pub struct TrieChanges { + pub old_root: StateRoot, + pub new_root: StateRoot, + insertions: Vec, + deletions: Vec, +} + +impl TrieChanges { + pub fn empty(old_root: StateRoot) -> Self { + TrieChanges { old_root, new_root: old_root, insertions: vec![], deletions: vec![] } + } +} + +/// Result of applying state part to Trie. +pub struct ApplyStatePartResult { + /// Trie changes after applying state part. + pub trie_changes: TrieChanges, + /// Contract codes belonging to the state part. + pub contract_codes: Vec, +} + +impl Trie { + pub fn new(store: Box, _shard_uid: ShardUId) -> Self { + Trie { storage: store, counter: TouchedNodesCounter::default() } + } + + pub fn recording_reads(&self) -> Self { + let storage = + self.storage.as_caching_storage().expect("Storage should be TrieCachingStorage"); + let storage = TrieRecordingStorage { + store: Arc::clone(&storage.store), + shard_uid: storage.shard_uid, + recorded: RefCell::new(Default::default()), + }; + Trie { storage: Box::new(storage), counter: TouchedNodesCounter::default() } + } + + pub fn empty_root() -> StateRoot { + StateRoot::default() + } + + pub fn recorded_storage(&self) -> Option { + let storage = self.storage.as_recording_storage()?; + let mut nodes: Vec<_> = + storage.recorded.borrow_mut().drain().map(|(_key, value)| value).collect(); + nodes.sort(); + Some(PartialStorage { nodes: PartialState(nodes) }) + } + + pub fn from_recorded_storage(partial_storage: PartialStorage) -> Self { + let recorded_storage = + partial_storage.nodes.0.into_iter().map(|value| (hash(&value), value)).collect(); + Trie { + storage: Box::new(TrieMemoryPartialStorage { + recorded_storage, + visited_nodes: Default::default(), + }), + counter: TouchedNodesCounter::default(), + } + } + + #[cfg(test)] + fn memory_usage_verify(&self, memory: &NodesStorage, handle: NodeHandle) -> u64 { + if self.storage.as_recording_storage().is_some() { + return 0; + } + let TrieNodeWithSize { node, memory_usage } = match handle { + NodeHandle::InMemory(h) => memory.node_ref(h).clone(), + NodeHandle::Hash(h) => self.retrieve_node(&h).expect("storage failure"), + }; + + let mut memory_usage_naive = node.memory_usage_direct(memory); + match &node { + TrieNode::Empty => {} + TrieNode::Leaf(_key, _value) => {} + TrieNode::Branch(children, _value) => { + memory_usage_naive += children + .iter() + .filter_map(Option::as_ref) + .map(|handle| self.memory_usage_verify(memory, handle.clone())) + .sum::(); + } + TrieNode::Extension(_key, child) => { + memory_usage_naive += self.memory_usage_verify(memory, child.clone()); + } + }; + if memory_usage_naive != memory_usage { + eprintln!("Incorrectly calculated memory usage"); + eprintln!("Correct is {}", memory_usage_naive); + eprintln!("Computed is {}", memory_usage); + match handle { + NodeHandle::InMemory(h) => { + eprintln!("TRIE!!!!"); + eprintln!("{}", memory.node_ref(h).node.deep_to_string(memory)); + } + NodeHandle::Hash(_h) => { + eprintln!("Bad node in storage!"); + } + }; + assert_eq!(memory_usage_naive, memory_usage); + } + memory_usage + } + + fn delete_value( + &self, + memory: &mut NodesStorage, + value: &ValueHandle, + ) -> Result<(), StorageError> { + match value { + ValueHandle::HashAndSize(_, hash) => { + let bytes = self.storage.retrieve_raw_bytes(hash)?; + memory.refcount_changes.entry(*hash).or_insert_with(|| (bytes.to_vec(), 0)).1 -= 1; + } + ValueHandle::InMemory(_) => { + // do nothing + } + } + Ok(()) + } + + fn move_node_to_mutable( + &self, + memory: &mut NodesStorage, + hash: &CryptoHash, + ) -> Result { + if *hash == Trie::empty_root() { + Ok(memory.store(TrieNodeWithSize::empty())) + } else { + self.counter.increment(); + let bytes = self.storage.retrieve_raw_bytes(hash)?; + match RawTrieNodeWithSize::decode(&bytes) { + Ok(value) => { + let result = memory.store(TrieNodeWithSize::from_raw(value)); + memory + .refcount_changes + .entry(*hash) + .or_insert_with(|| (bytes.to_vec(), 0)) + .1 -= 1; + Ok(result) + } + Err(_) => Err(StorageError::StorageInconsistentState(format!( + "Failed to decode node {}", + hash + ))), + } + } + } + + fn retrieve_node(&self, hash: &CryptoHash) -> Result { + if *hash == Trie::empty_root() { + return Ok(TrieNodeWithSize::empty()); + } + let bytes = self.retrieve_raw_bytes(hash)?; + match RawTrieNodeWithSize::decode(&bytes) { + Ok(value) => Ok(TrieNodeWithSize::from_raw(value)), + Err(_) => Err(StorageError::StorageInconsistentState(format!( + "Failed to decode node {}", + hash + ))), + } + } + + pub(crate) fn retrieve_raw_bytes(&self, hash: &CryptoHash) -> Result, StorageError> { + self.counter.increment(); + self.storage.retrieve_raw_bytes(hash) + } + + pub fn retrieve_root_node(&self, root: &StateRoot) -> Result { + if *root == Trie::empty_root() { + return Ok(StateRootNode::empty()); + } + let data = self.retrieve_raw_bytes(root)?; + match RawTrieNodeWithSize::decode(&data) { + Ok(value) => { + let memory_usage = TrieNodeWithSize::from_raw(value).memory_usage; + Ok(StateRootNode { data, memory_usage }) + } + Err(_) => Err(StorageError::StorageInconsistentState(format!( + "Failed to decode node {}", + root + ))), + } + } + + fn lookup( + &self, + root: &CryptoHash, + mut key: NibbleSlice<'_>, + ) -> Result, StorageError> { + let mut hash = *root; + + loop { + if hash == Trie::empty_root() { + return Ok(None); + } + let bytes = self.retrieve_raw_bytes(&hash)?; + let node = RawTrieNodeWithSize::decode(&bytes).map_err(|_| { + StorageError::StorageInconsistentState("RawTrieNode decode failed".to_string()) + })?; + + match node.node { + RawTrieNode::Leaf(existing_key, value_length, value_hash) => { + if NibbleSlice::from_encoded(&existing_key).0 == key { + return Ok(Some((value_length, value_hash))); + } else { + return Ok(None); + } + } + RawTrieNode::Extension(existing_key, child) => { + let existing_key = NibbleSlice::from_encoded(&existing_key).0; + if key.starts_with(&existing_key) { + hash = child; + key = key.mid(existing_key.len()); + } else { + return Ok(None); + } + } + RawTrieNode::Branch(mut children, value) => { + if key.is_empty() { + match value { + Some((value_length, value_hash)) => { + return Ok(Some((value_length, value_hash))); + } + None => return Ok(None), + } + } else { + match children[key.at(0) as usize].take() { + Some(x) => { + hash = x; + key = key.mid(1); + } + None => return Ok(None), + } + } + } + }; + } + } + + pub fn get_ref( + &self, + root: &CryptoHash, + key: &[u8], + ) -> Result, StorageError> { + let key = NibbleSlice::new(key); + self.lookup(root, key) + } + + pub fn get(&self, root: &CryptoHash, key: &[u8]) -> Result>, StorageError> { + match self.get_ref(root, key)? { + Some((_length, hash)) => self.retrieve_raw_bytes(&hash).map(Some), + None => Ok(None), + } + } + + pub(crate) fn convert_to_insertions_and_deletions( + changes: HashMap, i32)>, + ) -> (Vec, Vec) { + let mut deletions = Vec::new(); + let mut insertions = Vec::new(); + for (trie_node_or_value_hash, (trie_node_or_value, rc)) in changes.into_iter() { + match rc.cmp(&0) { + Ordering::Greater => insertions.push(TrieRefcountChange { + trie_node_or_value_hash, + trie_node_or_value, + rc: rc as u32, + }), + Ordering::Less => deletions.push(TrieRefcountChange { + trie_node_or_value_hash, + trie_node_or_value, + rc: (-rc) as u32, + }), + Ordering::Equal => {} + } + } + // Sort so that trie changes have unique representation + insertions.sort(); + deletions.sort(); + (insertions, deletions) + } + + pub fn update(&self, root: &CryptoHash, changes: I) -> Result + where + I: Iterator, Option>)>, + { + let mut memory = NodesStorage::new(); + let mut root_node = self.move_node_to_mutable(&mut memory, root)?; + for (key, value) in changes { + let key = NibbleSlice::new(&key); + match value { + Some(arr) => { + root_node = self.insert(&mut memory, root_node, key, arr)?; + } + None => { + root_node = self.delete(&mut memory, root_node, key)?; + } + } + } + + #[cfg(test)] + { + self.memory_usage_verify(&memory, NodeHandle::InMemory(root_node)); + } + Trie::flatten_nodes(root, memory, root_node) + } + + pub fn iter<'a>(&'a self, root: &CryptoHash) -> Result, StorageError> { + TrieIterator::new(self, root) + } +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use crate::db::DBCol::ColState; + use crate::test_utils::{ + create_test_store, create_tries, create_tries_complex, gen_changes, simplify_changes, + test_populate_trie, + }; + + use super::*; + + type TrieChanges = Vec<(Vec, Option>)>; + const SHARD_VERSION: u32 = 1; + + fn test_clear_trie( + tries: &ShardTries, + root: &CryptoHash, + shard_uid: ShardUId, + changes: TrieChanges, + ) -> CryptoHash { + let trie = tries.get_trie_for_shard(shard_uid); + let delete_changes: TrieChanges = + changes.iter().map(|(key, _)| (key.clone(), None)).collect(); + let mut other_delete_changes = delete_changes.clone(); + let trie_changes = trie.update(root, other_delete_changes.drain(..)).unwrap(); + let (store_update, root) = tries.apply_all(&trie_changes, shard_uid).unwrap(); + store_update.commit().unwrap(); + for (key, _) in delete_changes { + assert_eq!(trie.get(&root, &key), Ok(None)); + } + root + } + + #[test] + fn test_encode_decode() { + let value = vec![123, 245, 255]; + let value_length = 3; + let value_hash = hash(&value); + let node = RawTrieNode::Leaf(vec![1, 2, 3], value_length, value_hash); + let buf = node.encode().expect("Failed to serialize"); + let new_node = RawTrieNode::decode(&buf).expect("Failed to deserialize"); + assert_eq!(node, new_node); + + let mut children: [Option; 16] = Default::default(); + children[3] = Some(CryptoHash::default()); + let node = RawTrieNode::Branch(children, Some((value_length, value_hash))); + let buf = node.encode().expect("Failed to serialize"); + let new_node = RawTrieNode::decode(&buf).expect("Failed to deserialize"); + assert_eq!(node, new_node); + + let node = RawTrieNode::Extension(vec![123, 245, 255], CryptoHash::default()); + let buf = node.encode().expect("Failed to serialize"); + let new_node = RawTrieNode::decode(&buf).expect("Failed to deserialize"); + assert_eq!(node, new_node); + } + + #[test] + fn test_basic_trie() { + // test trie version > 0 + let tries = create_tries_complex(SHARD_VERSION, 2); + let shard_uid = ShardUId { version: SHARD_VERSION, shard_id: 0 }; + let trie = tries.get_trie_for_shard(shard_uid); + let empty_root = Trie::empty_root(); + assert_eq!(trie.get(&empty_root, &[122]), Ok(None)); + let changes = vec![ + (b"doge".to_vec(), Some(b"coin".to_vec())), + (b"docu".to_vec(), Some(b"value".to_vec())), + (b"do".to_vec(), Some(b"verb".to_vec())), + (b"horse".to_vec(), Some(b"stallion".to_vec())), + (b"dog".to_vec(), Some(b"puppy".to_vec())), + (b"h".to_vec(), Some(b"value".to_vec())), + ]; + let root = test_populate_trie(&tries, &empty_root, shard_uid, changes.clone()); + let new_root = test_clear_trie(&tries, &root, shard_uid, changes); + assert_eq!(new_root, empty_root); + assert_eq!(trie.iter(&new_root).unwrap().fold(0, |acc, _| acc + 1), 0); + } + + #[test] + fn test_trie_iter() { + let tries = create_tries_complex(SHARD_VERSION, 2); + let shard_uid = ShardUId { version: SHARD_VERSION, shard_id: 0 }; + let trie = tries.get_trie_for_shard(shard_uid); + let pairs = vec![ + (b"a".to_vec(), Some(b"111".to_vec())), + (b"b".to_vec(), Some(b"222".to_vec())), + (b"x".to_vec(), Some(b"333".to_vec())), + (b"y".to_vec(), Some(b"444".to_vec())), + ]; + let root = test_populate_trie(&tries, &Trie::empty_root(), shard_uid, pairs.clone()); + let mut iter_pairs = vec![]; + for pair in trie.iter(&root).unwrap() { + let (key, value) = pair.unwrap(); + iter_pairs.push((key, Some(value.to_vec()))); + } + assert_eq!(pairs, iter_pairs); + + let mut other_iter = trie.iter(&root).unwrap(); + other_iter.seek(b"r").unwrap(); + assert_eq!(other_iter.next().unwrap().unwrap().0, b"x".to_vec()); + } + + #[test] + fn test_trie_leaf_into_branch() { + let tries = create_tries_complex(SHARD_VERSION, 2); + let shard_uid = ShardUId { version: SHARD_VERSION, shard_id: 0 }; + let changes = vec![ + (b"dog".to_vec(), Some(b"puppy".to_vec())), + (b"dog2".to_vec(), Some(b"puppy".to_vec())), + (b"xxx".to_vec(), Some(b"puppy".to_vec())), + ]; + test_populate_trie(&tries, &Trie::empty_root(), shard_uid, changes); + } + + #[test] + fn test_trie_same_node() { + let tries = create_tries(); + let changes = vec![ + (b"dogaa".to_vec(), Some(b"puppy".to_vec())), + (b"dogbb".to_vec(), Some(b"puppy".to_vec())), + (b"cataa".to_vec(), Some(b"puppy".to_vec())), + (b"catbb".to_vec(), Some(b"puppy".to_vec())), + (b"dogax".to_vec(), Some(b"puppy".to_vec())), + ]; + test_populate_trie(&tries, &Trie::empty_root(), ShardUId::single_shard(), changes); + } + + #[test] + fn test_trie_iter_seek_stop_at_extension() { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let changes = vec![ + (vec![0, 116, 101, 115, 116], Some(vec![0])), + (vec![2, 116, 101, 115, 116], Some(vec![0])), + ( + vec![ + 0, 116, 101, 115, 116, 44, 98, 97, 108, 97, 110, 99, 101, 115, 58, 98, 111, 98, + 46, 110, 101, 97, 114, + ], + Some(vec![0]), + ), + ( + vec![ + 0, 116, 101, 115, 116, 44, 98, 97, 108, 97, 110, 99, 101, 115, 58, 110, 117, + 108, 108, + ], + Some(vec![0]), + ), + ]; + let root = + test_populate_trie(&tries, &Trie::empty_root(), ShardUId::single_shard(), changes); + let mut iter = trie.iter(&root).unwrap(); + iter.seek(&vec![0, 116, 101, 115, 116, 44]).unwrap(); + let mut pairs = vec![]; + for pair in iter { + pairs.push(pair.unwrap().0); + } + assert_eq!( + pairs[..2], + [ + vec![ + 0, 116, 101, 115, 116, 44, 98, 97, 108, 97, 110, 99, 101, 115, 58, 98, 111, 98, + 46, 110, 101, 97, 114 + ], + vec![ + 0, 116, 101, 115, 116, 44, 98, 97, 108, 97, 110, 99, 101, 115, 58, 110, 117, + 108, 108 + ], + ] + ); + } + + #[test] + fn test_trie_remove_non_existent_key() { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let initial = vec![ + (vec![99, 44, 100, 58, 58, 49], Some(vec![1])), + (vec![99, 44, 100, 58, 58, 50], Some(vec![1])), + (vec![99, 44, 100, 58, 58, 50, 51], Some(vec![1])), + ]; + let root = + test_populate_trie(&tries, &Trie::empty_root(), ShardUId::single_shard(), initial); + + let changes = vec![ + (vec![99, 44, 100, 58, 58, 45, 49], None), + (vec![99, 44, 100, 58, 58, 50, 52], None), + ]; + let root = test_populate_trie(&tries, &root, ShardUId::single_shard(), changes); + for r in trie.iter(&root).unwrap() { + r.unwrap(); + } + } + + #[test] + fn test_equal_leafs() { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let initial = vec![ + (vec![1, 2, 3], Some(vec![1])), + (vec![2, 2, 3], Some(vec![1])), + (vec![3, 2, 3], Some(vec![1])), + ]; + let root = + test_populate_trie(&tries, &Trie::empty_root(), ShardUId::single_shard(), initial); + for r in trie.iter(&root).unwrap() { + r.unwrap(); + } + + let changes = vec![(vec![1, 2, 3], None)]; + let root = test_populate_trie(&tries, &root, ShardUId::single_shard(), changes); + for r in trie.iter(&root).unwrap() { + r.unwrap(); + } + } + + #[test] + fn test_trie_unique() { + let mut rng = rand::thread_rng(); + for _ in 0..100 { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let trie_changes = gen_changes(&mut rng, 20); + let simplified_changes = simplify_changes(&trie_changes); + + let trie_changes1 = + trie.update(&Trie::empty_root(), trie_changes.iter().cloned()).unwrap(); + let trie_changes2 = + trie.update(&Trie::empty_root(), simplified_changes.iter().cloned()).unwrap(); + if trie_changes1.new_root != trie_changes2.new_root { + eprintln!("{:?}", trie_changes); + eprintln!("{:?}", simplified_changes); + eprintln!("root1: {:?}", trie_changes1.new_root); + eprintln!("root2: {:?}", trie_changes2.new_root); + panic!("MISMATCH!"); + } + // TODO: compare state updates? + } + } + + #[test] + fn test_iterator_seek() { + let mut rng = rand::thread_rng(); + for _test_run in 0..10 { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let trie_changes = gen_changes(&mut rng, 500); + + let state_root = test_populate_trie( + &tries, + &Trie::empty_root(), + ShardUId::single_shard(), + trie_changes.clone(), + ); + let queries = gen_changes(&mut rng, 500).into_iter().map(|(key, _)| key); + for query in queries { + let mut iterator = trie.iter(&state_root).unwrap(); + iterator.seek(&query).unwrap(); + if let Some(Ok((key, _))) = iterator.next() { + assert!(key >= query); + } + } + } + } + + #[test] + fn test_refcounts() { + let mut rng = rand::thread_rng(); + for _test_run in 0..10 { + let num_iterations = rng.gen_range(1, 20); + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let mut state_root = Trie::empty_root(); + for _ in 0..num_iterations { + let trie_changes = gen_changes(&mut rng, 20); + state_root = + test_populate_trie(&tries, &state_root, ShardUId::single_shard(), trie_changes); + println!( + "New memory_usage: {}", + trie.retrieve_root_node(&state_root).unwrap().memory_usage + ); + } + { + let trie_changes = trie + .iter(&state_root) + .unwrap() + .map(|item| { + let (key, _) = item.unwrap(); + (key, None) + }) + .collect::>(); + state_root = + test_populate_trie(&tries, &state_root, ShardUId::single_shard(), trie_changes); + assert_eq!(state_root, Trie::empty_root(), "Trie must be empty"); + assert!( + trie.storage + .as_caching_storage() + .unwrap() + .store + .iter(ColState) + .peekable() + .peek() + .is_none(), + "Storage must be empty" + ); + } + } + } + + #[test] + fn test_trie_restart() { + let store = create_test_store(); + let tries = ShardTries::new(store.clone(), 0, 1); + let empty_root = Trie::empty_root(); + let changes = vec![ + (b"doge".to_vec(), Some(b"coin".to_vec())), + (b"docu".to_vec(), Some(b"value".to_vec())), + (b"do".to_vec(), Some(b"verb".to_vec())), + (b"horse".to_vec(), Some(b"stallion".to_vec())), + (b"dog".to_vec(), Some(b"puppy".to_vec())), + (b"h".to_vec(), Some(b"value".to_vec())), + ]; + let root = test_populate_trie(&tries, &empty_root, ShardUId::single_shard(), changes); + + let tries2 = ShardTries::new(store, 0, 1); + let trie2 = tries2.get_trie_for_shard(ShardUId::single_shard()); + assert_eq!(trie2.get(&root, b"doge"), Ok(Some(b"coin".to_vec()))); + } + + // TODO: somehow also test that we don't record unnecessary nodes + #[test] + fn test_trie_recording_reads() { + let store = create_test_store(); + let tries = ShardTries::new(store, 0, 1); + let empty_root = Trie::empty_root(); + let changes = vec![ + (b"doge".to_vec(), Some(b"coin".to_vec())), + (b"docu".to_vec(), Some(b"value".to_vec())), + (b"do".to_vec(), Some(b"verb".to_vec())), + (b"horse".to_vec(), Some(b"stallion".to_vec())), + (b"dog".to_vec(), Some(b"puppy".to_vec())), + (b"h".to_vec(), Some(b"value".to_vec())), + ]; + let root = test_populate_trie(&tries, &empty_root, ShardUId::single_shard(), changes); + + let trie2 = tries.get_trie_for_shard(ShardUId::single_shard()).recording_reads(); + trie2.get(&root, b"dog").unwrap(); + trie2.get(&root, b"horse").unwrap(); + let partial_storage = trie2.recorded_storage(); + + let trie3 = Trie::from_recorded_storage(partial_storage.unwrap()); + + assert_eq!(trie3.get(&root, b"dog"), Ok(Some(b"puppy".to_vec()))); + assert_eq!(trie3.get(&root, b"horse"), Ok(Some(b"stallion".to_vec()))); + assert_eq!(trie3.get(&root, b"doge"), Err(StorageError::TrieNodeMissing)); + } + + #[test] + fn test_trie_recording_reads_update() { + let store = create_test_store(); + let tries = ShardTries::new(store, 0, 1); + let empty_root = Trie::empty_root(); + let changes = vec![ + (b"doge".to_vec(), Some(b"coin".to_vec())), + (b"docu".to_vec(), Some(b"value".to_vec())), + ]; + let root = test_populate_trie(&tries, &empty_root, ShardUId::single_shard(), changes); + // Trie: extension -> branch -> 2 leaves + { + let trie2 = tries.get_trie_for_shard(ShardUId::single_shard()).recording_reads(); + trie2.get(&root, b"doge").unwrap(); + // record extension, branch and one leaf with value, but not the other + assert_eq!(trie2.recorded_storage().unwrap().nodes.0.len(), 4); + } + + { + let trie2 = tries.get_trie_for_shard(ShardUId::single_shard()).recording_reads(); + let updates = vec![(b"doge".to_vec(), None)]; + trie2.update(&root, updates.into_iter()).unwrap(); + // record extension, branch and both leaves (one with value) + assert_eq!(trie2.recorded_storage().unwrap().nodes.0.len(), 5); + } + + { + let trie2 = tries.get_trie_for_shard(ShardUId::single_shard()).recording_reads(); + let updates = vec![(b"dodo".to_vec(), Some(b"asdf".to_vec()))]; + trie2.update(&root, updates.into_iter()).unwrap(); + // record extension and branch, but not leaves + assert_eq!(trie2.recorded_storage().unwrap().nodes.0.len(), 2); + } + } + + #[test] + fn test_dump_load_trie() { + let store = create_test_store(); + let tries = ShardTries::new(store.clone(), 0, 1); + let empty_root = Trie::empty_root(); + let changes = vec![ + (b"doge".to_vec(), Some(b"coin".to_vec())), + (b"docu".to_vec(), Some(b"value".to_vec())), + ]; + let root = test_populate_trie(&tries, &empty_root, ShardUId::single_shard(), changes); + let dir = tempfile::Builder::new().prefix("test_dump_load_trie").tempdir().unwrap(); + store.save_to_file(ColState, &dir.path().join("test.bin")).unwrap(); + let store2 = create_test_store(); + store2.load_from_file(ColState, &dir.path().join("test.bin")).unwrap(); + let tries2 = ShardTries::new(store2, 0, 1); + let trie2 = tries2.get_trie_for_shard(ShardUId::single_shard()); + assert_eq!(trie2.get(&root, b"doge").unwrap().unwrap(), b"coin"); + } +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/nibble_slice.rs b/mock-enclave/src/skw-vm-store/src/trie/nibble_slice.rs new file mode 100644 index 0000000..3bd2436 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/nibble_slice.rs @@ -0,0 +1,355 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Nibble-orientated view onto byte-slice, allowing nibble-precision offsets. + +use elastic_array::ElasticArray36; +use std::cmp::*; +use std::fmt; + +/// Nibble-orientated view onto byte-slice, allowing nibble-precision offsets. +/// +/// This is an immutable struct. No operations actually change it. +/// +/// # Example +/// ```snippet +/// use patricia_trie::nibbleslice::NibbleSlice; +/// { +/// let d1 = &[0x01u8, 0x23, 0x45]; +/// let d2 = &[0x34u8, 0x50, 0x12]; +/// let d3 = &[0x00u8, 0x12]; +/// let n1 = NibbleSlice::new(d1); // 0,1,2,3,4,5 +/// let n2 = NibbleSlice::new(d2); // 3,4,5,0,1,2 +/// let n3 = NibbleSlice::new_offset(d3, 1); // 0,1,2 +/// assert!(n1 > n3); // 0,1,2,... > 0,1,2 +/// assert!(n1 < n2); // 0,... < 3,... +/// assert!(n2.mid(3) == n3); // 0,1,2 == 0,1,2 +/// assert!(n1.starts_with(&n3)); +/// assert_eq!(n1.common_prefix(&n3), 3); +/// assert_eq!(n2.mid(3).common_prefix(&n1), 3); +/// } +/// ``` +#[derive(Copy, Clone, Eq, Ord)] +pub struct NibbleSlice<'a> { + data: &'a [u8], + offset: usize, +} + +/// Iterator type for a nibble slice. +pub struct NibbleSliceIterator<'a> { + p: &'a NibbleSlice<'a>, + i: usize, +} + +impl Iterator for NibbleSliceIterator<'_> { + type Item = u8; + + fn next(&mut self) -> Option { + self.i += 1; + if self.i <= self.p.len() { + Some(self.p.at(self.i - 1)) + } else { + None + } + } +} + +impl<'a> NibbleSlice<'a> { + /// Create a new nibble slice with the given byte-slice. + pub fn new(data: &'a [u8]) -> Self { + NibbleSlice::new_offset(data, 0) + } + + /// Create a new nibble slice with the given byte-slice with a nibble offset. + pub fn new_offset(data: &'a [u8], offset: usize) -> Self { + NibbleSlice { data, offset } + } + + /// Get an iterator for the series of nibbles. + pub fn iter(&'a self) -> NibbleSliceIterator<'a> { + NibbleSliceIterator { p: self, i: 0 } + } + + /// Create a new nibble slice from the given HPE encoded data (e.g. output of `encoded()`). + pub fn from_encoded(data: &'a [u8]) -> (Self, bool) { + (Self::new_offset(data, if data[0] & 16 == 16 { 1 } else { 2 }), data[0] & 32 == 32) + } + + /// Is this an empty slice? + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Get the length (in nibbles, naturally) of this slice. + #[inline] + pub fn len(&self) -> usize { + self.data.len() * 2 - self.offset + } + + /// Get the nibble at position `i`. + #[inline(always)] + pub fn at(&self, i: usize) -> u8 { + if (self.offset + i) & 1 == 1 { + self.data[(self.offset + i) / 2] & 15u8 + } else { + self.data[(self.offset + i) / 2] >> 4 + } + } + + /// Return object which represents a view on to this slice (further) offset by `i` nibbles. + pub fn mid(&self, i: usize) -> Self { + NibbleSlice { data: self.data, offset: self.offset + i } + } + + /// Do we start with the same nibbles as the whole of `them`? + pub fn starts_with(&self, them: &Self) -> bool { + self.common_prefix(them) == them.len() + } + + /// How many of the same nibbles at the beginning do we match with `them`? + pub fn common_prefix(&self, them: &Self) -> usize { + let s = min(self.len(), them.len()); + for i in 0..s { + if self.at(i) != them.at(i) { + return i; + } + } + s + } + + /// Encode while nibble slice in prefixed hex notation, noting whether it `is_leaf`. + #[inline] + pub fn encode_nibbles(nibbles: &[u8], is_leaf: bool) -> ElasticArray36 { + let l = nibbles.len(); + let mut r = ElasticArray36::new(); + let mut i = l % 2; + r.push(if i == 1 { 0x10 + nibbles[0] } else { 0 } + if is_leaf { 0x20 } else { 0 }); + while i < l { + r.push(nibbles[i] * 16 + nibbles[i + 1]); + i += 2; + } + r + } + + /// Encode while nibble slice in prefixed hex notation, noting whether it `is_leaf`. + #[inline] + pub fn encoded(&self, is_leaf: bool) -> ElasticArray36 { + let l = self.len(); + let mut r = ElasticArray36::new(); + let mut i = l % 2; + r.push(if i == 1 { 0x10 + self.at(0) } else { 0 } + if is_leaf { 0x20 } else { 0 }); + while i < l { + r.push(self.at(i) * 16 + self.at(i + 1)); + i += 2; + } + r + } + + pub fn merge_encoded(&self, other: &Self, is_leaf: bool) -> ElasticArray36 { + let l = self.len() + other.len(); + let mut r = ElasticArray36::new(); + let mut i = l % 2; + r.push(if i == 1 { 0x10 + self.at(0) } else { 0 } + if is_leaf { 0x20 } else { 0 }); + while i < l { + let bit1 = if i < self.len() { self.at(i) } else { other.at(i - self.len()) }; + let bit2 = if i + 1 < l { + if i + 1 < self.len() { + self.at(i + 1) + } else { + other.at(i + 1 - self.len()) + } + } else { + 0 + }; + + r.push(bit1 * 16 + bit2); + i += 2; + } + r + } + + /// Encode only the leftmost `n` bytes of the nibble slice in prefixed hex notation, + /// noting whether it `is_leaf`. + pub fn encoded_leftmost(&self, n: usize, is_leaf: bool) -> ElasticArray36 { + let l = min(self.len(), n); + let mut r = ElasticArray36::new(); + let mut i = l % 2; + r.push(if i == 1 { 0x10 + self.at(0) } else { 0 } + if is_leaf { 0x20 } else { 0 }); + while i < l { + r.push(self.at(i) * 16 + self.at(i + 1)); + i += 2; + } + r + } +} + +impl PartialEq for NibbleSlice<'_> { + fn eq(&self, them: &Self) -> bool { + self.len() == them.len() && self.starts_with(them) + } +} + +impl PartialOrd for NibbleSlice<'_> { + fn partial_cmp(&self, them: &Self) -> Option { + let s = min(self.len(), them.len()); + for i in 0..s { + match self.at(i).partial_cmp(&them.at(i)).unwrap() { + Ordering::Less => return Some(Ordering::Less), + Ordering::Greater => return Some(Ordering::Greater), + _ => {} + } + } + self.len().partial_cmp(&them.len()) + } +} + +impl fmt::Debug for NibbleSlice<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.is_empty() { + return Ok(()); + } + write!(f, "{:01x}", self.at(0))?; + for i in 1..self.len() { + write!(f, "'{:01x}", self.at(i))?; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::NibbleSlice; + use elastic_array::ElasticArray36; + use rand::{thread_rng, Rng}; + + static D: &[u8; 3] = &[0x01u8, 0x23, 0x45]; + + #[test] + fn basics() { + let n = NibbleSlice::new(D); + assert_eq!(n.len(), 6); + assert!(!n.is_empty()); + + let n = NibbleSlice::new_offset(D, 6); + assert!(n.is_empty()); + + let n = NibbleSlice::new_offset(D, 3); + assert_eq!(n.len(), 3); + for i in 0..3 { + assert_eq!(n.at(i), i as u8 + 3); + } + } + + #[test] + fn iterator() { + let n = NibbleSlice::new(D); + let mut nibbles: Vec = vec![]; + nibbles.extend(n.iter()); + assert_eq!(nibbles, (0u8..6).collect::>()) + } + + #[test] + fn mid() { + let n = NibbleSlice::new(D); + let m = n.mid(2); + for i in 0..4 { + assert_eq!(m.at(i), i as u8 + 2); + } + let m = n.mid(3); + for i in 0..3 { + assert_eq!(m.at(i), i as u8 + 3); + } + } + + #[test] + fn encoded() { + let n = NibbleSlice::new(D); + assert_eq!(n.encoded(false), ElasticArray36::from_slice(&[0x00, 0x01, 0x23, 0x45])); + assert_eq!(n.encoded(true), ElasticArray36::from_slice(&[0x20, 0x01, 0x23, 0x45])); + assert_eq!(n.mid(1).encoded(false), ElasticArray36::from_slice(&[0x11, 0x23, 0x45])); + assert_eq!(n.mid(1).encoded(true), ElasticArray36::from_slice(&[0x31, 0x23, 0x45])); + } + + #[test] + fn from_encoded() { + let n = NibbleSlice::new(D); + assert_eq!((n, false), NibbleSlice::from_encoded(&[0x00, 0x01, 0x23, 0x45])); + assert_eq!((n, true), NibbleSlice::from_encoded(&[0x20, 0x01, 0x23, 0x45])); + assert_eq!((n.mid(1), false), NibbleSlice::from_encoded(&[0x11, 0x23, 0x45])); + assert_eq!((n.mid(1), true), NibbleSlice::from_encoded(&[0x31, 0x23, 0x45])); + } + + fn encode_decode(nibbles: &[u8], is_leaf: bool) { + let n = NibbleSlice::encode_nibbles(nibbles, is_leaf); + let (n, is_leaf_decoded) = NibbleSlice::from_encoded(&n); + assert_eq!(&n.iter().collect::>(), nibbles); + assert_eq!(is_leaf_decoded, is_leaf) + } + + #[test] + fn test_encode_decode() { + encode_decode(&[15u8], false); + encode_decode(&[0u8], false); + encode_decode(&[15u8], true); + encode_decode(&[0u8], true); + let mut rng = thread_rng(); + for _ in 0..100 { + let l = rng.gen_range(0, 10); + let nibbles: Vec<_> = (0..l).map(|_| rng.gen_range(0, 16) as u8).collect(); + encode_decode(&nibbles, true); + encode_decode(&nibbles, false); + } + } + + #[test] + fn shared() { + let n = NibbleSlice::new(D); + + let other = &[0x01u8, 0x23, 0x01, 0x23, 0x45, 0x67]; + let m = NibbleSlice::new(other); + + assert_eq!(n.common_prefix(&m), 4); + assert_eq!(m.common_prefix(&n), 4); + assert_eq!(n.mid(1).common_prefix(&m.mid(1)), 3); + assert_eq!(n.mid(1).common_prefix(&m.mid(2)), 0); + assert_eq!(n.common_prefix(&m.mid(4)), 6); + assert!(!n.starts_with(&m.mid(4))); + assert!(m.mid(4).starts_with(&n)); + } + + #[test] + fn compare() { + let other = &[0x01u8, 0x23, 0x01, 0x23, 0x45]; + let n = NibbleSlice::new(D); + let m = NibbleSlice::new(other); + + assert!(n != m); + assert!(n > m); + assert!(m < n); + + assert!(n == m.mid(4)); + assert!(n >= m.mid(4)); + assert!(n <= m.mid(4)); + } + + #[test] + fn nibble_indexing() { + let encoded = vec![32, 116, 101, 115, 116]; + let n = NibbleSlice::from_encoded(&encoded).0; + let nibbles: Vec = (0..n.len()).map(|i| n.at(i)).collect(); + assert_eq!(nibbles, vec![7, 4, 6, 5, 7, 3, 7, 4]); + } +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/shard_tries.rs b/mock-enclave/src/skw-vm-store/src/trie/shard_tries.rs new file mode 100644 index 0000000..6474924 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/shard_tries.rs @@ -0,0 +1,412 @@ +use std::rc::Rc; +use std::sync::{Arc, RwLock}; + +use borsh::BorshSerialize; +use near_primitives::borsh::maybestd::collections::HashMap; +use near_primitives::hash::CryptoHash; +use near_primitives::shard_layout; +use near_primitives::shard_layout::{ShardUId, ShardVersion}; +use near_primitives::trie_key::TrieKey; +use near_primitives::types::{ + NumShards, RawStateChange, RawStateChangesWithTrieKey, StateChangeCause, StateRoot, +}; + +use crate::db::{DBCol, DBOp, DBTransaction}; +use crate::trie::trie_storage::{TrieCache, TrieCachingStorage}; +use crate::trie::{TrieRefcountChange, POISONED_LOCK_ERR}; +use crate::{StorageError, Store, StoreUpdate, Trie, TrieChanges, TrieUpdate}; + +struct ShardTriesInner { + store: Arc, + /// Cache reserved for client actor to use + caches: RwLock>, + /// Cache for readers. + view_caches: RwLock>, +} + +#[derive(Clone)] +pub struct ShardTries(Arc); + +impl ShardTries { + fn get_new_cache(shards: &[ShardUId]) -> HashMap { + shards.iter().map(|&shard_id| (shard_id, TrieCache::new())).collect() + } + + pub fn new(store: Arc, shard_version: ShardVersion, num_shards: NumShards) -> Self { + assert_ne!(num_shards, 0); + let shards: Vec<_> = (0..num_shards) + .map(|shard_id| ShardUId { version: shard_version, shard_id: shard_id as u32 }) + .collect(); + ShardTries(Arc::new(ShardTriesInner { + store, + caches: RwLock::new(Self::get_new_cache(&shards)), + view_caches: RwLock::new(Self::get_new_cache(&shards)), + })) + } + + pub fn is_same(&self, other: &Self) -> bool { + Arc::ptr_eq(&self.0, &other.0) + } + + pub fn new_trie_update(&self, shard_uid: ShardUId, state_root: CryptoHash) -> TrieUpdate { + TrieUpdate::new(Rc::new(self.get_trie_for_shard(shard_uid)), state_root) + } + + pub fn new_trie_update_view(&self, shard_uid: ShardUId, state_root: CryptoHash) -> TrieUpdate { + TrieUpdate::new(Rc::new(self.get_view_trie_for_shard(shard_uid)), state_root) + } + + fn get_trie_for_shard_internal(&self, shard_uid: ShardUId, is_view: bool) -> Trie { + let caches_to_use = if is_view { &self.0.view_caches } else { &self.0.caches }; + let cache = { + let mut caches = caches_to_use.write().expect(POISONED_LOCK_ERR); + caches.entry(shard_uid).or_insert_with(TrieCache::new).clone() + }; + let store = Box::new(TrieCachingStorage::new(self.0.store.clone(), cache, shard_uid)); + Trie::new(store, shard_uid) + } + + pub fn get_trie_for_shard(&self, shard_uid: ShardUId) -> Trie { + self.get_trie_for_shard_internal(shard_uid, false) + } + + pub fn get_view_trie_for_shard(&self, shard_uid: ShardUId) -> Trie { + self.get_trie_for_shard_internal(shard_uid, true) + } + + pub fn get_store(&self) -> Arc { + self.0.store.clone() + } + + pub fn update_cache(&self, transaction: &DBTransaction) -> std::io::Result<()> { + let mut caches = self.0.caches.write().expect(POISONED_LOCK_ERR); + let mut shards = HashMap::new(); + for op in &transaction.ops { + match op { + DBOp::UpdateRefcount { col, ref key, ref value } if *col == DBCol::ColState => { + let (shard_uid, hash) = + TrieCachingStorage::get_shard_uid_and_hash_from_key(key)?; + shards.entry(shard_uid).or_insert(vec![]).push((hash, Some(value))); + } + DBOp::Insert { col, .. } if *col == DBCol::ColState => unreachable!(), + DBOp::Delete { col, .. } if *col == DBCol::ColState => unreachable!(), + DBOp::DeleteAll { col } if *col == DBCol::ColState => { + // Delete is possible in reset_data_pre_state_sync + for (_, cache) in caches.iter() { + cache.clear(); + } + } + _ => {} + } + } + for (shard_uid, ops) in shards { + let cache = caches.entry(shard_uid).or_insert_with(TrieCache::new).clone(); + cache.update_cache(ops); + } + Ok(()) + } + + fn apply_deletions_inner( + deletions: &Vec, + tries: ShardTries, + shard_uid: ShardUId, + store_update: &mut StoreUpdate, + ) -> Result<(), StorageError> { + store_update.tries = Some(tries); + for TrieRefcountChange { trie_node_or_value_hash, trie_node_or_value, rc } in + deletions.iter() + { + let key = TrieCachingStorage::get_key_from_shard_uid_and_hash( + shard_uid, + trie_node_or_value_hash, + ); + store_update.update_refcount( + DBCol::ColState, + key.as_ref(), + trie_node_or_value, + -(*rc as i64), + ); + } + Ok(()) + } + + fn apply_insertions_inner( + insertions: &Vec, + tries: ShardTries, + shard_uid: ShardUId, + store_update: &mut StoreUpdate, + ) -> Result<(), StorageError> { + store_update.tries = Some(tries); + for TrieRefcountChange { trie_node_or_value_hash, trie_node_or_value, rc } in + insertions.iter() + { + let key = TrieCachingStorage::get_key_from_shard_uid_and_hash( + shard_uid, + trie_node_or_value_hash, + ); + store_update.update_refcount( + DBCol::ColState, + key.as_ref(), + trie_node_or_value, + *rc as i64, + ); + } + Ok(()) + } + + fn apply_all_inner( + trie_changes: &TrieChanges, + tries: ShardTries, + shard_uid: ShardUId, + apply_deletions: bool, + ) -> Result<(StoreUpdate, StateRoot), StorageError> { + let mut store_update = StoreUpdate::new_with_tries(tries.clone()); + ShardTries::apply_insertions_inner( + &trie_changes.insertions, + tries.clone(), + shard_uid, + &mut store_update, + )?; + if apply_deletions { + ShardTries::apply_deletions_inner( + &trie_changes.deletions, + tries, + shard_uid, + &mut store_update, + )?; + } + Ok((store_update, trie_changes.new_root)) + } + + pub fn apply_insertions( + &self, + trie_changes: &TrieChanges, + shard_uid: ShardUId, + store_update: &mut StoreUpdate, + ) -> Result<(), StorageError> { + ShardTries::apply_insertions_inner( + &trie_changes.insertions, + self.clone(), + shard_uid, + store_update, + ) + } + + pub fn apply_deletions( + &self, + trie_changes: &TrieChanges, + shard_uid: ShardUId, + store_update: &mut StoreUpdate, + ) -> Result<(), StorageError> { + ShardTries::apply_deletions_inner( + &trie_changes.deletions, + self.clone(), + shard_uid, + store_update, + ) + } + + pub fn revert_insertions( + &self, + trie_changes: &TrieChanges, + shard_uid: ShardUId, + store_update: &mut StoreUpdate, + ) -> Result<(), StorageError> { + ShardTries::apply_deletions_inner( + &trie_changes.insertions, + self.clone(), + shard_uid, + store_update, + ) + } + + pub fn apply_all( + &self, + trie_changes: &TrieChanges, + shard_uid: ShardUId, + ) -> Result<(StoreUpdate, StateRoot), StorageError> { + ShardTries::apply_all_inner(trie_changes, self.clone(), shard_uid, true) + } + + // apply_all with less memory overhead + pub fn apply_genesis( + &self, + trie_changes: TrieChanges, + shard_uid: ShardUId, + ) -> (StoreUpdate, StateRoot) { + assert_eq!(trie_changes.old_root, CryptoHash::default()); + assert!(trie_changes.deletions.is_empty()); + // Not new_with_tries on purpose + let mut store_update = StoreUpdate::new(self.get_store().storage.clone()); + for TrieRefcountChange { trie_node_or_value_hash, trie_node_or_value, rc } in + trie_changes.insertions.into_iter() + { + let key = TrieCachingStorage::get_key_from_shard_uid_and_hash( + shard_uid, + &trie_node_or_value_hash, + ); + store_update.update_refcount( + DBCol::ColState, + key.as_ref(), + &trie_node_or_value, + rc as i64, + ); + } + (store_update, trie_changes.new_root) + } +} + +pub struct WrappedTrieChanges { + tries: ShardTries, + shard_uid: ShardUId, + trie_changes: TrieChanges, + state_changes: Vec, + block_hash: CryptoHash, +} + +impl WrappedTrieChanges { + pub fn new( + tries: ShardTries, + shard_uid: ShardUId, + trie_changes: TrieChanges, + state_changes: Vec, + block_hash: CryptoHash, + ) -> Self { + WrappedTrieChanges { tries, shard_uid, trie_changes, state_changes, block_hash } + } + + pub fn state_changes(&self) -> &[RawStateChangesWithTrieKey] { + &self.state_changes + } + + pub fn insertions_into(&self, store_update: &mut StoreUpdate) -> Result<(), StorageError> { + self.tries.apply_insertions(&self.trie_changes, self.shard_uid, store_update) + } + + /// Save state changes into Store. + /// + /// NOTE: the changes are drained from `self`. + pub fn state_changes_into(&mut self, store_update: &mut StoreUpdate) { + for change_with_trie_key in self.state_changes.drain(..) { + assert!( + !change_with_trie_key.changes.iter().any(|RawStateChange { cause, .. }| matches!( + cause, + StateChangeCause::NotWritableToDisk + )), + "NotWritableToDisk changes must never be finalized." + ); + + assert!( + !change_with_trie_key.changes.iter().any(|RawStateChange { cause, .. }| matches!( + cause, + StateChangeCause::Resharding + )), + "Resharding changes must never be finalized." + ); + + // Filtering trie keys for user facing RPC reporting. + // NOTE: If the trie key is not one of the account specific, it may cause key conflict + // when the node tracks multiple shards. See #2563. + match &change_with_trie_key.trie_key { + TrieKey::Account { .. } + | TrieKey::ContractCode { .. } + | TrieKey::AccessKey { .. } + | TrieKey::ContractData { .. } => {} + _ => continue, + }; + let storage_key = KeyForStateChanges::new_from_trie_key( + &self.block_hash, + &change_with_trie_key.trie_key, + ); + store_update.set( + DBCol::ColStateChanges, + storage_key.as_ref(), + &change_with_trie_key.try_to_vec().expect("Borsh serialize cannot fail"), + ); + } + } + + pub fn wrapped_into( + &mut self, + store_update: &mut StoreUpdate, + ) -> Result<(), Box> { + self.insertions_into(store_update)?; + self.state_changes_into(store_update); + store_update.set_ser( + DBCol::ColTrieChanges, + &shard_layout::get_block_shard_uid(&self.block_hash, &self.shard_uid), + &self.trie_changes, + )?; + Ok(()) + } +} + +#[derive(derive_more::AsRef, derive_more::Into)] +pub struct KeyForStateChanges(Vec); + +impl KeyForStateChanges { + fn estimate_prefix_len() -> usize { + std::mem::size_of::() + } + + fn get_prefix_with_capacity(block_hash: &CryptoHash, reserve_capacity: usize) -> Self { + let mut key_prefix = Vec::with_capacity(Self::estimate_prefix_len() + reserve_capacity); + key_prefix.extend(block_hash.as_ref()); + debug_assert_eq!(key_prefix.len(), Self::estimate_prefix_len()); + Self(key_prefix) + } + + pub fn get_prefix(block_hash: &CryptoHash) -> Self { + Self::get_prefix_with_capacity(block_hash, 0) + } + + pub fn new(block_hash: &CryptoHash, raw_key: &[u8]) -> Self { + let mut key = Self::get_prefix_with_capacity(block_hash, raw_key.len()); + key.0.extend(raw_key); + key + } + + pub fn new_from_trie_key(block_hash: &CryptoHash, trie_key: &TrieKey) -> Self { + let mut key = Self::get_prefix_with_capacity(block_hash, trie_key.len()); + key.0.extend(trie_key.to_vec()); + key + } + + pub fn find_iter<'a: 'b, 'b>( + &'a self, + store: &'b Store, + ) -> impl Iterator> + 'b { + let prefix_len = Self::estimate_prefix_len(); + debug_assert!(self.0.len() >= prefix_len); + store.iter_prefix_ser::(DBCol::ColStateChanges, &self.0).map( + move |change| { + // Split off the irrelevant part of the key, so only the original trie_key is left. + let (key, state_changes) = change?; + debug_assert!(key.starts_with(&self.0)); + Ok(state_changes) + }, + ) + } + + pub fn find_exact_iter<'a: 'b, 'b>( + &'a self, + store: &'b Store, + ) -> impl Iterator> + 'b { + let prefix_len = Self::estimate_prefix_len(); + let trie_key_len = self.0.len() - prefix_len; + self.find_iter(store).filter_map(move |change| { + let state_changes = match change { + Ok(change) => change, + error => { + return Some(error); + } + }; + if state_changes.trie_key.len() != trie_key_len { + None + } else { + debug_assert_eq!(&state_changes.trie_key.to_vec()[..], &self.0[prefix_len..]); + Some(Ok(state_changes)) + } + }) + } +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/split_state.rs b/mock-enclave/src/skw-vm-store/src/trie/split_state.rs new file mode 100644 index 0000000..dae03b0 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/split_state.rs @@ -0,0 +1,813 @@ +use crate::trie::iterator::TrieItem; +use crate::{ + get, get_delayed_receipt_indices, set, ShardTries, StoreUpdate, Trie, TrieChanges, TrieUpdate, +}; +use borsh::BorshDeserialize; +use bytesize::ByteSize; +use near_primitives::account::id::AccountId; +use near_primitives::errors::StorageError; +use near_primitives::receipt::Receipt; +use near_primitives::shard_layout::ShardUId; +use near_primitives::trie_key::trie_key_parsers::parse_account_id_from_raw_key; +use near_primitives::trie_key::TrieKey; +use near_primitives::types::{ + ConsolidatedStateChange, StateChangeCause, StateChangesForSplitStates, StateRoot, +}; +use std::collections::HashMap; + +impl Trie { + /// Computes the set of trie items (nodes with keys and values) for a state part. + /// + /// # Panics + /// storage must be a TrieCachingStorage + /// part_id must be in [0..num_parts) + /// + /// # Errors + /// StorageError if the storage is corrupted + pub fn get_trie_items_for_part( + &self, + part_id: u64, + num_parts: u64, + state_root: &StateRoot, + ) -> Result, StorageError> { + assert!(part_id < num_parts); + assert!(self.storage.as_caching_storage().is_some()); + + let path_begin = self.find_path_for_part_boundary(state_root, part_id, num_parts)?; + let path_end = self.find_path_for_part_boundary(state_root, part_id + 1, num_parts)?; + self.iter(state_root)?.get_trie_items(&path_begin, &path_end) + } +} + +impl ShardTries { + /// applies `changes` to split states + /// and returns the generated TrieChanges for all split states + /// Note that this function is different from the function `add_values_to_split_states` + /// This function is used for applying updates to split states when processing blocks + /// `add_values_to_split_states` are used to generate the initial states for shards split + /// from the original parent shard. + pub fn apply_state_changes_to_split_states<'a>( + &self, + state_roots: &HashMap, + changes: StateChangesForSplitStates, + account_id_to_shard_id: &(dyn Fn(&AccountId) -> ShardUId + 'a), + ) -> Result, StorageError> { + let mut trie_updates: HashMap<_, _> = self.get_trie_updates(state_roots); + let mut insert_receipts = Vec::new(); + for ConsolidatedStateChange { trie_key, value } in changes.changes { + match &trie_key { + TrieKey::DelayedReceiptIndices => {} + TrieKey::DelayedReceipt { index } => match value { + Some(value) => { + let receipt = Receipt::try_from_slice(&value).map_err(|err| { + StorageError::StorageInconsistentState(format!( + "invalid delayed receipt {:?}, err: {}", + value, + err.to_string(), + )) + })?; + insert_receipts.push((*index, receipt)); + } + None => {} + }, + TrieKey::Account { account_id } + | TrieKey::ContractCode { account_id } + | TrieKey::AccessKey { account_id, .. } + | TrieKey::ReceivedData { receiver_id: account_id, .. } + | TrieKey::PostponedReceiptId { receiver_id: account_id, .. } + | TrieKey::PendingDataCount { receiver_id: account_id, .. } + | TrieKey::PostponedReceipt { receiver_id: account_id, .. } + | TrieKey::ContractData { account_id, .. } => { + let new_shard_uid = account_id_to_shard_id(account_id); + // we can safely unwrap here because the caller of this function guarantees trie_updates contains all shard_uids for the new shards + let trie_update = trie_updates.get_mut(&new_shard_uid).unwrap(); + match value { + Some(value) => trie_update.set(trie_key, value), + None => trie_update.remove(trie_key), + } + } + } + } + for (_, update) in trie_updates.iter_mut() { + update.commit(StateChangeCause::Resharding); + } + + insert_receipts.sort_by_key(|it| it.0); + + let insert_receipts: Vec<_> = + insert_receipts.into_iter().map(|(_, receipt)| receipt).collect(); + + apply_delayed_receipts_to_split_states_impl( + &mut trie_updates, + &insert_receipts, + &changes.processed_delayed_receipts, + account_id_to_shard_id, + )?; + + let mut trie_changes_map = HashMap::new(); + for (shard_uid, update) in trie_updates { + let (trie_changes, _) = update.finalize()?; + trie_changes_map.insert(shard_uid, trie_changes); + } + Ok(trie_changes_map) + } + + /// add `values` (key-value pairs of items stored in states) to build states for new shards + /// `state_roots` contains state roots for the new shards + /// The caller must guarantee that `state_roots` contains all shard_ids + /// that `key_to_shard_id` that may return + /// Ignore changes on DelayedReceipts or DelayedReceiptsIndices + /// Returns `store_update` and the new state_roots for split states + pub fn add_values_to_split_states<'a>( + &self, + state_roots: &HashMap, + values: Vec<(Vec, Option>)>, + account_id_to_shard_id: &(dyn Fn(&AccountId) -> ShardUId + 'a), + ) -> Result<(StoreUpdate, HashMap), StorageError> { + self.add_values_to_split_states_impl(state_roots, values, &|raw_key| { + // Here changes on DelayedReceipts or DelayedReceiptsIndices will be excluded + // This is because we cannot migrate delayed receipts part by part. They have to be + // reconstructed in the new states after all DelayedReceipts are ready in the original + // shard. + if let Some(account_id) = parse_account_id_from_raw_key(raw_key).map_err(|e| { + let err = format!("error parsing account id from trie key {:?}: {:?}", raw_key, e); + StorageError::StorageInconsistentState(err) + })? { + let new_shard_uid = account_id_to_shard_id(&account_id); + Ok(Some(new_shard_uid)) + } else { + Ok(None) + } + }) + } + + fn add_values_to_split_states_impl<'a>( + &self, + state_roots: &HashMap, + values: Vec<(Vec, Option>)>, + key_to_shard_id: &(dyn Fn(&[u8]) -> Result, StorageError> + 'a), + ) -> Result<(StoreUpdate, HashMap), StorageError> { + let mut changes_by_shard: HashMap<_, Vec<_>> = HashMap::new(); + for (raw_key, value) in values.into_iter() { + if let Some(new_shard_uid) = key_to_shard_id(&raw_key)? { + changes_by_shard.entry(new_shard_uid).or_default().push((raw_key, value)); + } + } + let mut new_state_roots = state_roots.clone(); + let mut store_update = StoreUpdate::new_with_tries(self.clone()); + for (shard_uid, changes) in changes_by_shard { + let trie = self.get_trie_for_shard(shard_uid); + // Here we assume that state_roots contains shard_uid, the caller of this method will guarantee that + let trie_changes = trie.update(&state_roots[&shard_uid], changes.into_iter())?; + let (update, state_root) = self.apply_all(&trie_changes, shard_uid)?; + new_state_roots.insert(shard_uid, state_root); + store_update.merge(update); + } + Ok((store_update, new_state_roots)) + } + + fn get_trie_updates( + &self, + state_roots: &HashMap, + ) -> HashMap { + state_roots + .iter() + .map(|(shard_uid, state_root)| { + (*shard_uid, self.new_trie_update(*shard_uid, *state_root)) + }) + .collect() + } + + pub fn apply_delayed_receipts_to_split_states<'a>( + &self, + state_roots: &HashMap, + receipts: &[Receipt], + account_id_to_shard_id: &(dyn Fn(&AccountId) -> ShardUId + 'a), + ) -> Result<(StoreUpdate, HashMap), StorageError> { + let mut trie_updates: HashMap<_, _> = self.get_trie_updates(state_roots); + apply_delayed_receipts_to_split_states_impl( + &mut trie_updates, + receipts, + &[], + account_id_to_shard_id, + )?; + self.finalize_and_apply_trie_updates(trie_updates) + } + + fn finalize_and_apply_trie_updates( + &self, + updates: HashMap, + ) -> Result<(StoreUpdate, HashMap), StorageError> { + let mut new_state_roots = HashMap::new(); + let mut merged_store_update = StoreUpdate::new_with_tries(self.clone()); + for (shard_uid, update) in updates { + let (trie_changes, _) = update.finalize()?; + let (store_update, state_root) = self.apply_all(&trie_changes, shard_uid)?; + new_state_roots.insert(shard_uid, state_root); + merged_store_update.merge(store_update); + } + Ok((merged_store_update, new_state_roots)) + } +} + +fn apply_delayed_receipts_to_split_states_impl<'a>( + trie_updates: &mut HashMap, + insert_receipts: &[Receipt], + delete_receipts: &[Receipt], + account_id_to_shard_id: &(dyn Fn(&AccountId) -> ShardUId + 'a), +) -> Result<(), StorageError> { + let mut delayed_receipts_indices_by_shard = HashMap::new(); + for (shard_uid, update) in trie_updates.iter() { + delayed_receipts_indices_by_shard.insert(*shard_uid, get_delayed_receipt_indices(update)?); + } + + for receipt in insert_receipts { + let new_shard_uid: ShardUId = account_id_to_shard_id(&receipt.receiver_id); + if !trie_updates.contains_key(&new_shard_uid) { + let err = format!( + "Account {} is in new shard {:?} but state_roots only contains {:?}", + receipt.receiver_id, + new_shard_uid, + trie_updates.keys(), + ); + return Err(StorageError::StorageInconsistentState(err)); + } + // we already checked that new_shard_uid is in trie_updates and delayed_receipts_indices + // so we can safely unwrap here + let mut delayed_receipts_indices = + delayed_receipts_indices_by_shard.get_mut(&new_shard_uid).unwrap(); + set( + trie_updates.get_mut(&new_shard_uid).unwrap(), + TrieKey::DelayedReceipt { index: delayed_receipts_indices.next_available_index }, + receipt, + ); + delayed_receipts_indices.next_available_index = + delayed_receipts_indices.next_available_index.checked_add(1).ok_or_else(|| { + StorageError::StorageInconsistentState( + "Next available index for delayed receipt exceeded the integer limit" + .to_string(), + ) + })?; + } + + for receipt in delete_receipts { + let new_shard_uid: ShardUId = account_id_to_shard_id(&receipt.receiver_id); + if !trie_updates.contains_key(&new_shard_uid) { + let err = format!( + "Account {} is in new shard {:?} but state_roots only contains {:?}", + receipt.receiver_id, + new_shard_uid, + trie_updates.keys(), + ); + return Err(StorageError::StorageInconsistentState(err)); + } + let mut delayed_receipts_indices = + delayed_receipts_indices_by_shard.get_mut(&new_shard_uid).unwrap(); + + let trie_update = trie_updates.get_mut(&new_shard_uid).unwrap(); + let trie_key = TrieKey::DelayedReceipt { index: delayed_receipts_indices.first_index }; + + let stored_receipt = get::(trie_update, &trie_key)? + .expect("removed receipt does not exist in new state"); + // check that the receipt to remove is at the first of delayed receipt queue + assert_eq!(&stored_receipt, receipt); + trie_update.remove(trie_key); + delayed_receipts_indices.first_index += 1; + } + + // commit the trie_updates and update state_roots + for (shard_uid, trie_update) in trie_updates { + set( + trie_update, + TrieKey::DelayedReceiptIndices, + delayed_receipts_indices_by_shard.get(shard_uid).unwrap(), + ); + trie_update.commit(StateChangeCause::Resharding); + } + Ok(()) +} + +/// Retrieve delayed receipts starting with `start_index` until `memory_limit` is hit +/// return None if there is no delayed receipts with index >= start_index +pub fn get_delayed_receipts( + state_update: &TrieUpdate, + start_index: Option, + memory_limit: ByteSize, +) -> Result)>, StorageError> { + let mut delayed_receipt_indices = get_delayed_receipt_indices(state_update)?; + if let Some(start_index) = start_index { + if start_index >= delayed_receipt_indices.next_available_index { + return Ok(None); + } + delayed_receipt_indices.first_index = start_index.max(delayed_receipt_indices.first_index); + } + let mut used_memory = 0; + let mut receipts = vec![]; + + while used_memory < memory_limit.as_u64() + && delayed_receipt_indices.first_index < delayed_receipt_indices.next_available_index + { + let key = TrieKey::DelayedReceipt { index: delayed_receipt_indices.first_index }; + let data = state_update.get(&key)?.ok_or_else(|| { + StorageError::StorageInconsistentState(format!( + "Delayed receipt #{} should be in the state", + delayed_receipt_indices.first_index + )) + })?; + used_memory += data.len() as u64; + delayed_receipt_indices.first_index += 1; + + let receipt = Receipt::try_from_slice(&data).map_err(|_| { + StorageError::StorageInconsistentState("Failed to deserialize".to_string()) + })?; + receipts.push(receipt); + } + Ok(Some((delayed_receipt_indices.first_index, receipts))) +} + +#[cfg(test)] +mod tests { + use crate::split_state::{apply_delayed_receipts_to_split_states_impl, get_delayed_receipts}; + use crate::test_utils::{ + create_tries, gen_changes, gen_larger_changes, gen_receipts, gen_unique_accounts, + simplify_changes, test_populate_trie, + }; + + use crate::{get, get_delayed_receipt_indices, set, set_account, ShardTries, ShardUId, Trie}; + use near_primitives::account::id::AccountId; + use near_primitives::account::Account; + use near_primitives::borsh::BorshSerialize; + use near_primitives::hash::{hash, CryptoHash}; + use near_primitives::receipt::{DelayedReceiptIndices, Receipt}; + use near_primitives::trie_key::trie_key_parsers::parse_account_id_from_raw_key; + use near_primitives::trie_key::TrieKey; + use near_primitives::types::{ + NumShards, StateChangeCause, StateChangesForSplitStates, StateRoot, + }; + use rand::seq::SliceRandom; + use rand::Rng; + use std::collections::HashMap; + + fn get_all_delayed_receipts( + tries: &ShardTries, + shard_uid: &ShardUId, + state_root: &StateRoot, + ) -> Vec { + let state_update = &tries.new_trie_update(*shard_uid, *state_root); + let mut delayed_receipt_indices = get_delayed_receipt_indices(state_update).unwrap(); + + let mut receipts = vec![]; + while delayed_receipt_indices.first_index < delayed_receipt_indices.next_available_index { + let key = TrieKey::DelayedReceipt { index: delayed_receipt_indices.first_index }; + let receipt = get(state_update, &key).unwrap().unwrap(); + delayed_receipt_indices.first_index += 1; + receipts.push(receipt); + } + receipts + } + + fn get_trie_nodes_except_delayed_receipts( + tries: &ShardTries, + shard_uid: &ShardUId, + state_root: &StateRoot, + ) -> Vec<(Vec, Vec)> { + let trie = tries.get_trie_for_shard(*shard_uid); + trie.iter(state_root) + .unwrap() + .map(Result::unwrap) + .filter(|(key, _)| parse_account_id_from_raw_key(key).unwrap().is_some()) + .collect() + } + + fn compare_state_and_split_states<'a>( + tries: &ShardTries, + state_root: &StateRoot, + state_roots: &HashMap, + account_id_to_shard_id: &(dyn Fn(&AccountId) -> ShardUId + 'a), + ) { + // check that the 4 tries combined to the orig trie + let trie_items = + get_trie_nodes_except_delayed_receipts(tries, &ShardUId::single_shard(), state_root); + let trie_items_by_shard: HashMap<_, _> = state_roots + .iter() + .map(|(&shard_uid, state_root)| { + (shard_uid, get_trie_nodes_except_delayed_receipts(tries, &shard_uid, state_root)) + }) + .collect(); + + let mut expected_trie_items_by_shard: HashMap<_, _> = + state_roots.iter().map(|(&shard_uid, _)| (shard_uid, vec![])).collect(); + for item in trie_items { + let account_id = parse_account_id_from_raw_key(&item.0).unwrap().unwrap(); + let shard_uid: ShardUId = account_id_to_shard_id(&account_id); + expected_trie_items_by_shard.get_mut(&shard_uid).unwrap().push(item); + } + assert_eq!(trie_items_by_shard, expected_trie_items_by_shard); + + let receipts_from_split_states: HashMap<_, _> = state_roots + .iter() + .map(|(&shard_uid, state_root)| { + let receipts = get_all_delayed_receipts(tries, &shard_uid, state_root); + (shard_uid, receipts) + }) + .collect(); + + let mut expected_receipts_by_shard: HashMap<_, Vec<_>> = + state_roots.iter().map(|(&shard_uid, _)| (shard_uid, vec![])).collect(); + for receipt in get_all_delayed_receipts(tries, &ShardUId::single_shard(), state_root) { + let shard_uid = account_id_to_shard_id(&receipt.receiver_id); + expected_receipts_by_shard.get_mut(&shard_uid).unwrap().push(receipt.clone()); + } + assert_eq!(expected_receipts_by_shard, receipts_from_split_states); + } + + #[test] + fn test_get_trie_items_for_part() { + let mut rng = rand::thread_rng(); + let tries = create_tries(); + let num_parts = rng.gen_range(5, 10); + + let changes = gen_larger_changes(&mut rng, 1000); + let changes = simplify_changes(&changes); + let state_root = test_populate_trie( + &tries, + &CryptoHash::default(), + ShardUId::single_shard(), + changes.clone(), + ); + let mut expected_trie_items: Vec<_> = + changes.into_iter().map(|(key, value)| (key, value.unwrap())).collect(); + expected_trie_items.sort(); + + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let total_trie_items = trie.get_trie_items_for_part(0, 1, &state_root).unwrap(); + assert_eq!(expected_trie_items, total_trie_items); + + let mut combined_trie_items = vec![]; + for part_id in 0..num_parts { + let trie_items = trie.get_trie_items_for_part(part_id, num_parts, &state_root).unwrap(); + combined_trie_items.extend_from_slice(&trie_items); + // check that items are split relatively evenly across all parts + assert!( + trie_items.len() + >= (total_trie_items.len() / num_parts as usize / 2) + .checked_sub(10) + .unwrap_or_default() + && trie_items.len() <= total_trie_items.len() / num_parts as usize * 2 + 10, + "part length {} avg length {}", + trie_items.len(), + total_trie_items.len() / num_parts as usize + ); + } + assert_eq!(expected_trie_items, combined_trie_items); + } + + #[test] + fn test_add_values_to_split_states() { + let mut rng = rand::thread_rng(); + + for _ in 0..20 { + let tries = create_tries(); + // add 4 new shards for version 1 + let num_shards = 4; + let mut state_root = Trie::empty_root(); + let mut state_roots: HashMap<_, _> = (0..num_shards) + .map(|x| (ShardUId { version: 1, shard_id: x as u32 }, CryptoHash::default())) + .collect(); + for _ in 0..10 { + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let changes = gen_changes(&mut rng, 100); + state_root = test_populate_trie( + &tries, + &state_root, + ShardUId::single_shard(), + changes.clone(), + ); + + let (store_update, new_state_roots) = tries + .add_values_to_split_states_impl(&state_roots, changes, &|raw_key| { + Ok(Some(ShardUId { + version: 1, + shard_id: (hash(raw_key).0[0] as NumShards % num_shards) as u32, + })) + }) + .unwrap(); + store_update.commit().unwrap(); + state_roots = new_state_roots; + + // check that the 4 tries combined to the orig trie + let trie_items: HashMap<_, _> = + trie.iter(&state_root).unwrap().map(Result::unwrap).collect(); + let mut combined_trie_items: HashMap, Vec> = HashMap::new(); + state_roots.iter().for_each(|(shard_uid, state_root)| { + let trie = tries.get_view_trie_for_shard(*shard_uid); + combined_trie_items.extend(trie.iter(state_root).unwrap().map(Result::unwrap)); + }); + assert_eq!(trie_items, combined_trie_items); + } + } + } + + #[test] + fn test_get_delayed_receipts() { + let mut rng = rand::thread_rng(); + for _ in 0..20 { + let memory_limit = bytesize::ByteSize::b(rng.gen_range(200, 1000)); + let all_receipts = gen_receipts(&mut rng, 200); + + // push receipt to trie + let tries = create_tries(); + let mut trie_update = + tries.new_trie_update(ShardUId::single_shard(), StateRoot::default()); + let mut delayed_receipt_indices = DelayedReceiptIndices::default(); + + for (i, receipt) in all_receipts.iter().enumerate() { + set(&mut trie_update, TrieKey::DelayedReceipt { index: i as u64 }, receipt); + } + delayed_receipt_indices.next_available_index = all_receipts.len() as u64; + set(&mut trie_update, TrieKey::DelayedReceiptIndices, &delayed_receipt_indices); + trie_update.commit(StateChangeCause::Resharding); + let (trie_changes, _) = trie_update.finalize().unwrap(); + let (store_update, state_root) = + tries.apply_all(&trie_changes, ShardUId::single_shard()).unwrap(); + store_update.commit().unwrap(); + + assert_eq!( + all_receipts, + get_all_delayed_receipts(&tries, &ShardUId::single_shard(), &state_root) + ); + let mut start_index = 0; + + let trie_update = tries.new_trie_update(ShardUId::single_shard(), state_root); + while let Some((next_index, receipts)) = + get_delayed_receipts(&trie_update, Some(start_index), memory_limit).unwrap() + { + assert_eq!(receipts, all_receipts[start_index as usize..next_index as usize]); + start_index = next_index; + + let total_memory_use = receipts + .iter() + .fold(0_u64, |sum, receipt| sum + receipt.try_to_vec().unwrap().len() as u64); + let memory_use_without_last_receipt = receipts[..receipts.len() - 1] + .iter() + .fold(0_u64, |sum, receipt| sum + receipt.try_to_vec().unwrap().len() as u64); + + assert!( + total_memory_use >= memory_limit.as_u64() + || next_index == all_receipts.len() as u64 + ); + assert!(memory_use_without_last_receipt < memory_limit.as_u64()); + } + } + } + + fn test_apply_delayed_receipts<'a>( + tries: &ShardTries, + new_receipts: &[Receipt], + delete_receipts: &[Receipt], + expected_all_receipts: &[Receipt], + state_roots: HashMap, + account_id_to_shard_id: &(dyn Fn(&AccountId) -> ShardUId + 'a), + ) -> HashMap { + let mut trie_updates: HashMap<_, _> = tries.get_trie_updates(&state_roots); + apply_delayed_receipts_to_split_states_impl( + &mut trie_updates, + new_receipts, + delete_receipts, + account_id_to_shard_id, + ) + .unwrap(); + let (state_update, new_state_roots) = + tries.finalize_and_apply_trie_updates(trie_updates).unwrap(); + state_update.commit().unwrap(); + + let receipts_by_shard: HashMap<_, _> = new_state_roots + .iter() + .map(|(shard_uid, state_root)| { + let receipts = get_all_delayed_receipts(tries, shard_uid, state_root); + (shard_uid, receipts) + }) + .collect(); + + let mut expected_receipts_by_shard: HashMap<_, _> = + state_roots.iter().map(|(shard_uid, _)| (shard_uid, vec![])).collect(); + for receipt in expected_all_receipts { + let shard_uid = account_id_to_shard_id(&receipt.receiver_id); + expected_receipts_by_shard.get_mut(&shard_uid).unwrap().push(receipt.clone()); + } + assert_eq!(expected_receipts_by_shard, receipts_by_shard); + + new_state_roots + } + + #[test] + fn test_apply_delayed_receipts_to_new_states() { + let mut rng = rand::thread_rng(); + + let tries = create_tries(); + let num_shards = 4; + + for _ in 0..10 { + let mut state_roots: HashMap<_, _> = (0..num_shards) + .map(|x| (ShardUId { version: 1, shard_id: x as u32 }, CryptoHash::default())) + .collect(); + let mut all_receipts = vec![]; + let mut start_index = 0; + for _ in 0..10 { + let receipts = gen_receipts(&mut rng, 100); + let new_start_index = rng.gen_range(start_index, all_receipts.len() + 1); + + all_receipts.extend_from_slice(&receipts); + state_roots = test_apply_delayed_receipts( + &tries, + &receipts, + &all_receipts[start_index..new_start_index], + &all_receipts[new_start_index..], + state_roots, + &|account_id| ShardUId { + shard_id: (hash(account_id.as_ref().as_bytes()).0[0] as NumShards + % num_shards) as u32, + version: 1, + }, + ); + start_index = new_start_index; + } + } + } + + fn test_split_and_update_state_impl(rng: &mut impl Rng) { + let tries = create_tries(); + // add accounts and receipts to state + let mut account_ids = gen_unique_accounts(rng, 100); + let mut trie_update = + tries.new_trie_update(ShardUId::single_shard(), CryptoHash::default()); + for account_id in account_ids.iter() { + set_account( + &mut trie_update, + account_id.clone(), + &Account::new(0, 0, CryptoHash::default(), 0), + ); + } + let receipts = gen_receipts(rng, 100); + // add accounts and receipts to the original shard + let mut state_root = { + for (index, receipt) in receipts.iter().enumerate() { + set(&mut trie_update, TrieKey::DelayedReceipt { index: index as u64 }, receipt); + } + set( + &mut trie_update, + TrieKey::DelayedReceiptIndices, + &DelayedReceiptIndices { + first_index: 0, + next_available_index: receipts.len() as u64, + }, + ); + trie_update.commit(StateChangeCause::Resharding); + let (trie_changes, _) = trie_update.finalize().unwrap(); + let (store_update, state_root) = + tries.apply_all(&trie_changes, ShardUId::single_shard()).unwrap(); + store_update.commit().unwrap(); + state_root + }; + + let num_shards = 4; + let account_id_to_shard_id = &|account_id: &AccountId| ShardUId { + shard_id: (hash(account_id.as_ref().as_bytes()).0[0] as NumShards % num_shards) as u32, + version: 1, + }; + + // add accounts and receipts to the split shards + let mut split_state_roots = { + let trie_items = tries + .get_view_trie_for_shard(ShardUId::single_shard()) + .get_trie_items_for_part(0, 1, &state_root) + .unwrap(); + let split_state_roots: HashMap<_, _> = (0..num_shards) + .map(|shard_id| { + (ShardUId { version: 1, shard_id: shard_id as u32 }, CryptoHash::default()) + }) + .collect(); + let (store_update, split_state_roots) = tries + .add_values_to_split_states( + &split_state_roots, + trie_items.into_iter().map(|(key, value)| (key, Some(value))).collect(), + account_id_to_shard_id, + ) + .unwrap(); + store_update.commit().unwrap(); + let (store_update, split_state_roots) = tries + .apply_delayed_receipts_to_split_states( + &split_state_roots, + &get_all_delayed_receipts(&tries, &ShardUId::single_shard(), &state_root), + account_id_to_shard_id, + ) + .unwrap(); + store_update.commit().unwrap(); + split_state_roots + }; + compare_state_and_split_states( + &tries, + &state_root, + &split_state_roots, + account_id_to_shard_id, + ); + + // update the original shard + for _ in 0..10 { + // add accounts + let new_accounts = gen_unique_accounts(rng, 10); + let mut trie_update = tries.new_trie_update(ShardUId::single_shard(), state_root); + for account_id in new_accounts.iter() { + set_account( + &mut trie_update, + account_id.clone(), + &Account::new(0, 0, CryptoHash::default(), 0), + ); + } + // remove accounts + account_ids.shuffle(rng); + let remove_count = rng.gen_range(0, 10).min(account_ids.len()); + for account_id in account_ids[0..remove_count].iter() { + trie_update.remove(TrieKey::Account { account_id: account_id.clone() }); + } + account_ids = account_ids[remove_count..].to_vec(); + account_ids.extend(new_accounts); + + // remove delayed receipts + let mut delayed_receipt_indices = get_delayed_receipt_indices(&trie_update).unwrap(); + println!( + "delayed receipt indices {} {}", + delayed_receipt_indices.first_index, delayed_receipt_indices.next_available_index + ); + let next_first_index = rng.gen_range( + delayed_receipt_indices.first_index, + delayed_receipt_indices.next_available_index + 1, + ); + let mut removed_receipts = vec![]; + for index in delayed_receipt_indices.first_index..next_first_index { + let trie_key = TrieKey::DelayedReceipt { index }; + removed_receipts.push(get::(&trie_update, &trie_key).unwrap().unwrap()); + trie_update.remove(trie_key); + } + delayed_receipt_indices.first_index = next_first_index; + // add delayed receipts + let new_receipts = gen_receipts(rng, 10); + for receipt in new_receipts { + set( + &mut trie_update, + TrieKey::DelayedReceipt { index: delayed_receipt_indices.next_available_index }, + &receipt, + ); + delayed_receipt_indices.next_available_index += 1; + } + println!( + "after: delayed receipt indices {} {}", + delayed_receipt_indices.first_index, delayed_receipt_indices.next_available_index + ); + set(&mut trie_update, TrieKey::DelayedReceiptIndices, &delayed_receipt_indices); + trie_update.commit(StateChangeCause::Resharding); + let (trie_changes, state_changes) = trie_update.finalize().unwrap(); + let (store_update, new_state_root) = + tries.apply_all(&trie_changes, ShardUId::single_shard()).unwrap(); + store_update.commit().unwrap(); + state_root = new_state_root; + + // update split states + let trie_changes = tries + .apply_state_changes_to_split_states( + &split_state_roots, + StateChangesForSplitStates::from_raw_state_changes( + &state_changes, + removed_receipts, + ), + account_id_to_shard_id, + ) + .unwrap(); + split_state_roots = trie_changes + .iter() + .map(|(shard_uid, trie_changes)| { + let (state_update, state_root) = + tries.apply_all(trie_changes, *shard_uid).unwrap(); + state_update.commit().unwrap(); + (*shard_uid, state_root) + }) + .collect(); + + compare_state_and_split_states( + &tries, + &state_root, + &split_state_roots, + account_id_to_shard_id, + ); + } + } + + #[test] + fn test_split_and_update_states() { + // build states + let mut rng = rand::thread_rng(); + for _ in 0..20 { + test_split_and_update_state_impl(&mut rng); + } + } +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/state_parts.rs b/mock-enclave/src/skw-vm-store/src/trie/state_parts.rs new file mode 100644 index 0000000..649fd9a --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/state_parts.rs @@ -0,0 +1,726 @@ +use std::collections::HashMap; + +use near_primitives::challenge::PartialState; +use near_primitives::hash::CryptoHash; +use near_primitives::types::StateRoot; +use tracing::error; + +use crate::trie::iterator::TrieTraversalItem; +use crate::trie::nibble_slice::NibbleSlice; +use crate::trie::{ + ApplyStatePartResult, NodeHandle, RawTrieNodeWithSize, TrieNode, TrieNodeWithSize, +}; +use crate::{PartialStorage, StorageError, Trie, TrieChanges, TrieIterator}; +use near_primitives::contract::ContractCode; +use near_primitives::state_record::is_contract_code_key; + +impl Trie { + /// Computes the set of trie nodes for a state part. + /// + /// # Panics + /// storage must be a TrieCachingStorage + /// part_id must be in [0..num_parts) + /// + /// # Errors + /// StorageError if the storage is corrupted + pub fn get_trie_nodes_for_part( + &self, + part_id: u64, + num_parts: u64, + state_root: &StateRoot, + ) -> Result { + assert!(part_id < num_parts); + assert!(self.storage.as_caching_storage().is_some()); + + let with_recording = self.recording_reads(); + with_recording.visit_nodes_for_state_part(state_root, part_id, num_parts)?; + let recorded = with_recording.recorded_storage().unwrap(); + + let trie_nodes = recorded.nodes; + Ok(trie_nodes) + } + + /// Assume we lay out all trie nodes in dfs order visiting children after the parent. + /// We take all node sizes (memory_usage_direct()) and take all nodes intersecting with + /// [size_start, size_end) interval, also all nodes necessary to prove it and some + /// additional nodes defined by the current implementation (TODO #1603 strict spec). + /// + /// Creating a StatePart takes all these nodes, validating a StatePart checks that it has the + /// right set of nodes. + fn visit_nodes_for_state_part( + &self, + root_hash: &CryptoHash, + part_id: u64, + num_parts: u64, + ) -> Result<(), StorageError> { + let path_begin = self.find_path_for_part_boundary(root_hash, part_id, num_parts)?; + let path_end = self.find_path_for_part_boundary(root_hash, part_id + 1, num_parts)?; + let mut iterator = self.iter(root_hash)?; + iterator.visit_nodes_interval(&path_begin, &path_end)?; + + // Extra nodes for compatibility with the previous version of computing state parts + if part_id + 1 != num_parts { + let mut iterator = TrieIterator::new(self, root_hash)?; + let path_end_encoded = NibbleSlice::encode_nibbles(&path_end, false); + iterator.seek_nibble_slice(NibbleSlice::from_encoded(&path_end_encoded[..]).0)?; + if let Some(item) = iterator.next() { + item?; + } + } + + Ok(()) + } + + /// Part part_id has nodes with paths [ path(part_id) .. path(part_id + 1) ) + /// path is returned as nibbles, last path is vec![16], previous paths end in nodes + pub(crate) fn find_path_for_part_boundary( + &self, + state_root: &StateRoot, + part_id: u64, + num_parts: u64, + ) -> Result, StorageError> { + assert!(part_id <= num_parts); + if part_id == num_parts { + return Ok(vec![16]); + } + let root_node = self.retrieve_node(state_root)?; + let total_size = root_node.memory_usage; + let size_start = (total_size + num_parts - 1) / num_parts * part_id; + self.find_path(&root_node, size_start) + } + + fn find_child( + &self, + size_start: u64, + node: &mut TrieNodeWithSize, + size_skipped: &mut u64, + key_nibbles: &mut Vec, + ) -> Result { + let node_size = node.node.memory_usage_direct_no_memory(); + if *size_skipped + node_size <= size_start { + *size_skipped += node_size; + } else { + return Ok(false); + } + match &node.node { + TrieNode::Empty => Ok(false), + TrieNode::Leaf(key, _) => { + let (slice, _is_leaf) = NibbleSlice::from_encoded(key); + key_nibbles.extend(slice.iter()); + Ok(false) + } + TrieNode::Branch(children, _) => { + for child_index in 0..children.len() { + let child = match &children[child_index] { + None => { + continue; + } + Some(NodeHandle::InMemory(_)) => { + unreachable!("only possible while mutating") + } + Some(NodeHandle::Hash(h)) => self.retrieve_node(h)?, + }; + if *size_skipped + child.memory_usage <= size_start { + *size_skipped += child.memory_usage; + continue; + } else { + key_nibbles.push(child_index as u8); + *node = child; + return Ok(true); + } + } + // This line should not be reached if node.memory_usage > size_start + // To avoid changing production behavior, I'm just adding a debug_assert here + // to indicate that + debug_assert!( + false, + "This should not be reached target size {} node memory usage {} size skipped {}", + size_start, node.memory_usage, size_skipped, + ); + error!( + target: "state_parts", + "This should not be reached target size {} node memory usage {} size skipped {}", + size_start, node.memory_usage, size_skipped, + ); + key_nibbles.push(16u8); + Ok(false) + } + TrieNode::Extension(key, child_handle) => { + let child = match child_handle { + NodeHandle::InMemory(_) => unreachable!("only possible while mutating"), + NodeHandle::Hash(h) => self.retrieve_node(h)?, + }; + let (slice, _is_leaf) = NibbleSlice::from_encoded(key); + key_nibbles.extend(slice.iter()); + *node = child; + Ok(true) + } + } + } + + // find the first node so that including this node, the traversed size is larger than size + fn find_path(&self, root_node: &TrieNodeWithSize, size: u64) -> Result, StorageError> { + if root_node.memory_usage <= size { + return Ok(vec![16u8]); + } + let mut key_nibbles: Vec = Vec::new(); + let mut node = root_node.clone(); + let mut size_skipped = 0u64; + while self.find_child(size, &mut node, &mut size_skipped, &mut key_nibbles)? {} + Ok(key_nibbles) + } + + /// Validate state part + /// + /// # Panics + /// part_id must be in [0..num_parts) + /// + /// # Errors + /// StorageError::TrieNodeWithMissing if some nodes are missing + pub fn validate_trie_nodes_for_part( + state_root: &StateRoot, + part_id: u64, + num_parts: u64, + trie_nodes: PartialState, + ) -> Result<(), StorageError> { + assert!(part_id < num_parts); + let num_nodes = trie_nodes.0.len(); + let trie = Trie::from_recorded_storage(PartialStorage { nodes: trie_nodes }); + + trie.visit_nodes_for_state_part(state_root, part_id, num_parts)?; + let storage = trie.storage.as_partial_storage().unwrap(); + + if storage.visited_nodes.borrow().len() != num_nodes { + // TODO #1603 not actually TrieNodeMissing. + // The error is that the proof has more nodes than needed. + return Err(StorageError::TrieNodeMissing); + } + Ok(()) + } + + fn apply_state_part_impl( + state_root: &StateRoot, + part_id: u64, + num_parts: u64, + part: Vec>, + ) -> Result { + if state_root == &CryptoHash::default() { + return Ok(ApplyStatePartResult { + trie_changes: TrieChanges::empty(CryptoHash::default()), + contract_codes: vec![], + }); + } + let trie = Trie::from_recorded_storage(PartialStorage { nodes: PartialState(part) }); + let path_begin = trie.find_path_for_part_boundary(state_root, part_id, num_parts)?; + let path_end = trie.find_path_for_part_boundary(state_root, part_id + 1, num_parts)?; + let mut iterator = TrieIterator::new(&trie, state_root)?; + let trie_traversal_items = iterator.visit_nodes_interval(&path_begin, &path_end)?; + let mut map = HashMap::new(); + let mut contract_codes = Vec::new(); + for TrieTraversalItem { hash, key } in trie_traversal_items { + let value = trie.retrieve_raw_bytes(&hash)?; + map.entry(hash).or_insert_with(|| (value.clone(), 0)).1 += 1; + if let Some(trie_key) = key { + if is_contract_code_key(&trie_key) { + contract_codes.push(ContractCode::new(value, None)); + } + } + } + let (insertions, deletions) = Trie::convert_to_insertions_and_deletions(map); + Ok(ApplyStatePartResult { + trie_changes: TrieChanges { + old_root: CryptoHash::default(), + new_root: *state_root, + insertions, + deletions, + }, + contract_codes, + }) + } + + /// Applies state part and returns the storage changes for the state part and all contract codes extracted from it. + /// Writing all storage changes gives the complete trie. + pub fn apply_state_part( + state_root: &StateRoot, + part_id: u64, + num_parts: u64, + part: Vec>, + ) -> ApplyStatePartResult { + Self::apply_state_part_impl(state_root, part_id, num_parts, part) + .expect("apply_state_part is guaranteed to succeed when each part is valid") + } + + pub fn get_memory_usage_from_serialized(bytes: &Vec) -> Result { + match RawTrieNodeWithSize::decode(bytes) { + Ok(value) => Ok(TrieNodeWithSize::from_raw(value).memory_usage), + Err(_) => { + Err(StorageError::StorageInconsistentState("Failed to decode node".to_string())) + } + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use rand::prelude::ThreadRng; + use rand::Rng; + + use near_primitives::hash::{hash, CryptoHash}; + + use crate::test_utils::{create_tries, gen_changes, test_populate_trie}; + use crate::trie::iterator::CrumbStatus; + use crate::trie::{TrieRefcountChange, ValueHandle}; + + use super::*; + use near_primitives::shard_layout::ShardUId; + + impl Trie { + /// Combines all parts and returns TrieChanges that can be applied to storage. + /// + /// # Input + /// parts[i] has trie nodes for part i + /// + /// # Errors + /// StorageError if data is inconsistent. Should never happen if each part was validated. + pub fn combine_state_parts_naive( + state_root: &StateRoot, + parts: &Vec>>, + ) -> Result { + let nodes = parts + .iter() + .map(|part| part.iter()) + .flatten() + .map(|data| data.to_vec()) + .collect::>(); + let trie = Trie::from_recorded_storage(PartialStorage { nodes: PartialState(nodes) }); + let mut insertions = , u32)>>::new(); + trie.traverse_all_nodes(state_root, |hash| { + if let Some((_bytes, rc)) = insertions.get_mut(hash) { + *rc += 1; + } else { + let bytes = trie.storage.retrieve_raw_bytes(hash)?; + insertions.insert(*hash, (bytes, 1)); + } + Ok(()) + })?; + let mut insertions = insertions + .into_iter() + .map(|(k, (v, rc))| TrieRefcountChange { + trie_node_or_value_hash: k, + trie_node_or_value: v, + rc, + }) + .collect::>(); + insertions.sort(); + Ok(TrieChanges { + old_root: Default::default(), + new_root: *state_root, + insertions, + deletions: vec![], + }) + } + + /// on_enter is applied for nodes as well as values + fn traverse_all_nodes Result<(), StorageError>>( + &self, + root: &CryptoHash, + mut on_enter: F, + ) -> Result<(), StorageError> { + if root == &CryptoHash::default() { + return Ok(()); + } + let mut stack: Vec<(CryptoHash, TrieNodeWithSize, CrumbStatus)> = Vec::new(); + let root_node = self.retrieve_node(root)?; + stack.push((*root, root_node, CrumbStatus::Entering)); + while let Some((hash, node, position)) = stack.pop() { + if let CrumbStatus::Entering = position { + on_enter(&hash)?; + } + match &node.node { + TrieNode::Empty => { + continue; + } + TrieNode::Leaf(_, value) => { + match value { + ValueHandle::HashAndSize(_, hash) => { + on_enter(hash)?; + } + ValueHandle::InMemory(_) => { + unreachable!("only possible while mutating") + } + } + continue; + } + TrieNode::Branch(children, value) => match position { + CrumbStatus::Entering => { + match value { + Some(ValueHandle::HashAndSize(_, hash)) => { + on_enter(hash)?; + } + _ => {} + } + stack.push((hash, node, CrumbStatus::AtChild(0))); + continue; + } + CrumbStatus::AtChild(mut i) => { + while i < 16 { + if let Some(NodeHandle::Hash(_h)) = children[i].as_ref() { + break; + } + i += 1; + } + if i < 16 { + if let Some(NodeHandle::Hash(h)) = children[i].clone() { + let child = self.retrieve_node(&h)?; + stack.push((hash, node, CrumbStatus::AtChild(i + 1))); + stack.push((h, child, CrumbStatus::Entering)); + } else { + stack.push((hash, node, CrumbStatus::Exiting)); + } + } else { + stack.push((hash, node, CrumbStatus::Exiting)); + } + } + CrumbStatus::Exiting => { + continue; + } + CrumbStatus::At => { + continue; + } + }, + TrieNode::Extension(_key, child) => { + if let CrumbStatus::Entering = position { + match child.clone() { + NodeHandle::InMemory(_) => { + unreachable!("only possible while mutating") + } + NodeHandle::Hash(h) => { + let child = self.retrieve_node(&h)?; + stack.push((hash, node, CrumbStatus::Exiting)); + stack.push((h, child, CrumbStatus::Entering)); + } + } + } + } + } + } + Ok(()) + } + + fn visit_nodes_for_size_range_old( + &self, + root_hash: &CryptoHash, + size_start: u64, + size_end: u64, + ) -> Result<(), StorageError> { + let root_node = self.retrieve_node(root_hash)?; + let path_begin = self.find_path(&root_node, size_start)?; + let path_end = self.find_path(&root_node, size_end)?; + + let mut iterator = TrieIterator::new(self, root_hash)?; + let path_begin_encoded = NibbleSlice::encode_nibbles(&path_begin, false); + iterator.seek_nibble_slice(NibbleSlice::from_encoded(&path_begin_encoded[..]).0)?; + loop { + match iterator.next() { + None => break, + Some(Err(e)) => { + return Err(e); + } + Some(Ok(_item)) => { + // The last iteration actually reads a value we don't need. + } + } + // TODO #1603 this is bad for large keys + if iterator.key_nibbles >= path_end { + break; + } + } + Ok(()) + } + + pub fn get_trie_nodes_for_part_old( + &self, + part_id: u64, + num_parts: u64, + state_root: &StateRoot, + ) -> Result { + assert!(part_id < num_parts); + assert!(self.storage.as_caching_storage().is_some()); + let root_node = self.retrieve_node(state_root)?; + let total_size = root_node.memory_usage; + let size_start = (total_size + num_parts - 1) / num_parts * part_id; + let size_end = + std::cmp::min((total_size + num_parts - 1) / num_parts * (part_id + 1), total_size); + + let with_recording = self.recording_reads(); + with_recording.visit_nodes_for_size_range_old(state_root, size_start, size_end)?; + let recorded = with_recording.recorded_storage().unwrap(); + + let trie_nodes = recorded.nodes; + + Ok(trie_nodes) + } + } + + #[test] + fn test_combine_empty_trie_parts() { + let state_root = StateRoot::default(); + let _ = Trie::combine_state_parts_naive(&state_root, &vec![]).unwrap(); + let _ = + Trie::validate_trie_nodes_for_part(&state_root, 0, 1, PartialState(vec![])).unwrap(); + let _ = Trie::apply_state_part(&state_root, 0, 1, vec![]); + } + + fn construct_trie_for_big_parts_1( + rng: &mut ThreadRng, + max_key_length: u64, + big_value_length: u64, + ) -> Vec<(Vec, Option>)> { + // Test #1: a long path where every node on the path has a large value + let mut trie_changes = Vec::new(); + for i in 0..max_key_length { + // ([255,255,..,255], big_value) + let key = (0..(i + 1)).map(|_| 255u8).collect::>(); + let value = (0..big_value_length).map(|_| rng.gen::()).collect::>(); + trie_changes.push((key, Some(value))); + } + trie_changes + } + + fn construct_trie_for_big_parts_2( + rng: &mut ThreadRng, + max_key_length: u64, + big_value_length: u64, + ) -> Vec<(Vec, Option>)> { + // Test #2: a long path where every node on the path has a large value, is a branch + // and each of its children is a branch. + let mut trie_changes = + construct_trie_for_big_parts_1(rng, max_key_length, big_value_length); + let small_value_length = 20; + for i in 0..max_key_length { + for x in 0u8..15u8 { + for y in 0u8..15u8 { + { + // ([255,255,..,255]xy, small_value) + // this means every 000..000 node is a branch and all of its children are branches + let mut key = (0..(i + 1)).map(|_| 255u8).collect::>(); + key.push(x * 16 + y); + let value = + (0..small_value_length).map(|_| rng.gen::()).collect::>(); + trie_changes.push((key, Some(value))); + } + { + let mut key = (0..i).map(|_| 255u8).collect::>(); + key.push(16 * 15 + x); + key.push(y * 16); + let value = + (0..small_value_length).map(|_| rng.gen::()).collect::>(); + trie_changes.push((key, Some(value))); + } + { + let mut key = (0..i).map(|_| 255u8).collect::>(); + key.push(16 * x + 15); + key.push(y); + let value = + (0..small_value_length).map(|_| rng.gen::()).collect::>(); + trie_changes.push((key, Some(value))); + } + } + } + } + trie_changes + } + + fn run_test_parts_not_huge(gen_trie_changes: F, big_value_length: u64) + where + F: FnOnce(&mut ThreadRng, u64, u64) -> Vec<(Vec, Option>)>, + { + let mut rng = rand::thread_rng(); + let max_key_length = 50u64; + let max_key_length_in_nibbles = max_key_length * 2; + let max_node_serialized_size = 32 * 16 + 100; // DEVNOTE nodes can be pretty big + let max_node_children = 16; + let max_part_overhead = + max_key_length_in_nibbles * max_node_serialized_size * max_node_children * 2 + + big_value_length * 2; + println!("Max allowed overhead: {}", max_part_overhead); + let trie_changes = gen_trie_changes(&mut rng, max_key_length, big_value_length); + println!("Number of nodes: {}", trie_changes.len()); + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let state_root = + test_populate_trie(&tries, &Trie::empty_root(), ShardUId::single_shard(), trie_changes); + let memory_size = trie.retrieve_root_node(&state_root).unwrap().memory_usage; + println!("Total memory size: {}", memory_size); + for num_parts in [2, 3, 5, 10, 50].iter().cloned() { + let approximate_size_per_part = memory_size / num_parts; + let parts = (0..num_parts) + .map(|part_id| { + trie.get_trie_nodes_for_part(part_id, num_parts, &state_root).unwrap().0 + }) + .collect::>(); + let part_nodecounts_vec = parts.iter().map(|nodes| nodes.len()).collect::>(); + let sizes_vec = parts + .iter() + .map(|nodes| nodes.iter().map(|node| node.len()).sum::()) + .collect::>(); + + println!("Node counts of parts: {:?}", part_nodecounts_vec); + println!("Sizes of parts: {:?}", sizes_vec); + println!("Max size we allow: {}", approximate_size_per_part + max_part_overhead); + for size in sizes_vec { + assert!((size as u64) < approximate_size_per_part + max_part_overhead); + } + } + } + + #[test] + fn test_parts_not_huge_1() { + run_test_parts_not_huge(construct_trie_for_big_parts_1, 100_000); + } + + #[test] + fn test_parts_not_huge_2() { + run_test_parts_not_huge(construct_trie_for_big_parts_2, 100_000); + } + + fn merge_trie_changes(changes: Vec) -> TrieChanges { + if changes.is_empty() { + return TrieChanges::empty(CryptoHash::default()); + } + let new_root = changes[0].new_root; + let mut map = HashMap::new(); + for changes_set in changes { + assert!(changes_set.deletions.is_empty(), "state parts only have insertions"); + for TrieRefcountChange { trie_node_or_value_hash, trie_node_or_value, rc } in + changes_set.insertions + { + map.entry(trie_node_or_value_hash).or_insert_with(|| (trie_node_or_value, 0)).1 += + rc as i32; + } + for TrieRefcountChange { trie_node_or_value_hash, trie_node_or_value, rc } in + changes_set.deletions + { + map.entry(trie_node_or_value_hash).or_insert_with(|| (trie_node_or_value, 0)).1 -= + rc as i32; + } + } + let (insertions, deletions) = Trie::convert_to_insertions_and_deletions(map); + TrieChanges { old_root: Default::default(), new_root, insertions, deletions } + } + + #[test] + fn test_combine_state_parts() { + let mut rng = rand::thread_rng(); + for _ in 0..2000 { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let trie_changes = gen_changes(&mut rng, 20); + let state_root = test_populate_trie( + &tries, + &Trie::empty_root(), + ShardUId::single_shard(), + trie_changes.clone(), + ); + let root_memory_usage = trie.retrieve_root_node(&state_root).unwrap().memory_usage; + + { + // Test that combining all parts gets all nodes + let num_parts = rng.gen_range(2, 10); + let parts = (0..num_parts) + .map(|part_id| { + trie.get_trie_nodes_for_part(part_id, num_parts, &state_root).unwrap().0 + }) + .collect::>(); + + let trie_changes = check_combine_state_parts(&state_root, num_parts, &parts); + + let mut nodes = >>::new(); + let sizes_vec = parts + .iter() + .map(|nodes| nodes.iter().map(|node| node.len()).sum::()) + .collect::>(); + + for part in parts { + for node in part { + nodes.insert(hash(&node), node); + } + } + let all_nodes = nodes.into_iter().map(|(_hash, node)| node).collect::>(); + assert_eq!(all_nodes.len(), trie_changes.insertions.len()); + let size_of_all = all_nodes.iter().map(|node| node.len()).sum::(); + let num_nodes = all_nodes.len(); + Trie::validate_trie_nodes_for_part(&state_root, 0, 1, PartialState(all_nodes)) + .expect("validate ok"); + + let sum_of_sizes = sizes_vec.iter().sum::(); + // Manually check that sizes are reasonable + println!("------------------------------"); + println!("Number of nodes: {:?}", num_nodes); + println!("Sizes of parts: {:?}", sizes_vec); + println!( + "All nodes size: {:?}, sum_of_sizes: {:?}, memory_usage: {:?}", + size_of_all, sum_of_sizes, root_memory_usage + ); + // borsh serialize should be about this size + assert!(size_of_all + 8 * num_nodes <= root_memory_usage as usize); + } + } + } + + fn check_combine_state_parts( + state_root: &CryptoHash, + num_parts: u64, + parts: &Vec>>, + ) -> TrieChanges { + let trie_changes = Trie::combine_state_parts_naive(state_root, parts).unwrap(); + + let trie_changes_new = { + let changes = (0..num_parts) + .map(|part_id| { + Trie::apply_state_part( + state_root, + part_id, + num_parts, + parts[part_id as usize].clone(), + ) + .trie_changes + }) + .collect::>(); + merge_trie_changes(changes) + }; + assert_eq!(trie_changes, trie_changes_new); + trie_changes + } + + #[test] + fn test_get_trie_nodes_for_part() { + let mut rng = rand::thread_rng(); + for _ in 0..20 { + let tries = create_tries(); + let trie = tries.get_trie_for_shard(ShardUId::single_shard()); + let trie_changes = gen_changes(&mut rng, 10); + + let state_root = test_populate_trie( + &tries, + &Trie::empty_root(), + ShardUId::single_shard(), + trie_changes.clone(), + ); + for _ in 0..10 { + // Test that creating and validating are consistent + let num_parts = rng.gen_range(1, 10); + let part_id = rng.gen_range(0, num_parts); + let trie_nodes = + trie.get_trie_nodes_for_part(part_id, num_parts, &state_root).unwrap(); + let trie_nodes2 = + trie.get_trie_nodes_for_part_old(part_id, num_parts, &state_root).unwrap(); + assert_eq!(trie_nodes, trie_nodes2); + Trie::validate_trie_nodes_for_part(&state_root, part_id, num_parts, trie_nodes) + .expect("validate ok"); + } + } + } +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/trie_storage.rs b/mock-enclave/src/skw-vm-store/src/trie/trie_storage.rs new file mode 100644 index 0000000..f3df781 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/trie_storage.rs @@ -0,0 +1,210 @@ +use std::collections::{HashMap, HashSet}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; + +use near_primitives::hash::CryptoHash; + +use crate::db::refcount::decode_value_with_rc; +use crate::trie::POISONED_LOCK_ERR; +use crate::{ColState, StorageError, Store}; +use lru::LruCache; +use near_primitives::shard_layout::ShardUId; +use std::cell::RefCell; +use std::io::ErrorKind; + +#[derive(Clone)] +pub struct TrieCache(Arc>>>); + +impl TrieCache { + pub fn new() -> Self { + Self(Arc::new(Mutex::new(LruCache::new(TRIE_MAX_CACHE_SIZE)))) + } + + pub fn clear(&self) { + self.0.lock().expect(POISONED_LOCK_ERR).clear() + } + + pub fn update_cache(&self, ops: Vec<(CryptoHash, Option<&Vec>)>) { + let mut guard = self.0.lock().expect(POISONED_LOCK_ERR); + for (hash, opt_value_rc) in ops { + if let Some(value_rc) = opt_value_rc { + if let (Some(value), _rc) = decode_value_with_rc(&value_rc) { + if value.len() < TRIE_LIMIT_CACHED_VALUE_SIZE { + guard.put(hash, value.to_vec()); + } + } else { + guard.pop(&hash); + } + } else { + guard.pop(&hash); + } + } + } +} + +pub trait TrieStorage { + /// Get bytes of a serialized TrieNode. + /// # Errors + /// StorageError if the storage fails internally or the hash is not present. + fn retrieve_raw_bytes(&self, hash: &CryptoHash) -> Result, StorageError>; + + fn as_caching_storage(&self) -> Option<&TrieCachingStorage> { + None + } + + fn as_recording_storage(&self) -> Option<&TrieRecordingStorage> { + None + } + + fn as_partial_storage(&self) -> Option<&TrieMemoryPartialStorage> { + None + } +} + +/// Records every value read by retrieve_raw_bytes. +/// Used for obtaining state parts (and challenges in the future). +pub struct TrieRecordingStorage { + pub(crate) store: Arc, + pub(crate) shard_uid: ShardUId, + pub(crate) recorded: RefCell>>, +} + +impl TrieStorage for TrieRecordingStorage { + fn retrieve_raw_bytes(&self, hash: &CryptoHash) -> Result, StorageError> { + if let Some(val) = self.recorded.borrow().get(hash) { + return Ok(val.clone()); + } + let key = TrieCachingStorage::get_key_from_shard_uid_and_hash(self.shard_uid, hash); + let val = self + .store + .get(ColState, key.as_ref()) + .map_err(|_| StorageError::StorageInternalError)?; + if let Some(val) = val { + self.recorded.borrow_mut().insert(*hash, val.clone()); + Ok(val) + } else { + Err(StorageError::StorageInconsistentState("Trie node missing".to_string())) + } + } + + fn as_recording_storage(&self) -> Option<&TrieRecordingStorage> { + Some(self) + } +} + +/// Storage for validating recorded partial storage. +/// visited_nodes are to validate that partial storage doesn't contain unnecessary nodes. +pub struct TrieMemoryPartialStorage { + pub(crate) recorded_storage: HashMap>, + pub(crate) visited_nodes: RefCell>, +} + +impl TrieStorage for TrieMemoryPartialStorage { + fn retrieve_raw_bytes(&self, hash: &CryptoHash) -> Result, StorageError> { + let result = self + .recorded_storage + .get(hash) + .map_or_else(|| Err(StorageError::TrieNodeMissing), |val| Ok(val.clone())); + if result.is_ok() { + self.visited_nodes.borrow_mut().insert(*hash); + } + result + } + + fn as_partial_storage(&self) -> Option<&TrieMemoryPartialStorage> { + Some(self) + } +} + +/// Maximum number of cache entries. +/// It was chosen to fit into RAM well. RAM spend on trie cache should not exceed +/// 50_000 * 4 (number of shards) * TRIE_LIMIT_CACHED_VALUE_SIZE = 800 MB. +/// In our tests on a single shard, it barely occupied 40 MB, which is dominated by state cache size +/// with 512 MB limit. The total RAM usage for a single shard was 1 GB. +#[cfg(not(feature = "no_cache"))] +const TRIE_MAX_CACHE_SIZE: usize = 50000; + +#[cfg(feature = "no_cache")] +const TRIE_MAX_CACHE_SIZE: usize = 1; + +/// Values above this size (in bytes) are never cached. +/// Note that Trie inner nodes are always smaller than this. +const TRIE_LIMIT_CACHED_VALUE_SIZE: usize = 4000; + +pub struct TrieCachingStorage { + pub(crate) store: Arc, + pub(crate) cache: TrieCache, + pub(crate) shard_uid: ShardUId, +} + +impl TrieCachingStorage { + pub fn new(store: Arc, cache: TrieCache, shard_uid: ShardUId) -> TrieCachingStorage { + TrieCachingStorage { store, cache, shard_uid } + } + + pub(crate) fn get_shard_uid_and_hash_from_key( + key: &[u8], + ) -> Result<(ShardUId, CryptoHash), std::io::Error> { + if key.len() != 40 { + return Err(std::io::Error::new(ErrorKind::Other, "Key is always shard_uid + hash")); + } + let id = ShardUId::try_from(&key[..8]).unwrap(); + let hash = CryptoHash::try_from(&key[8..]).unwrap(); + Ok((id, hash)) + } + + pub(crate) fn get_key_from_shard_uid_and_hash( + shard_uid: ShardUId, + hash: &CryptoHash, + ) -> [u8; 40] { + let mut key = [0; 40]; + key[0..8].copy_from_slice(&shard_uid.to_bytes()); + key[8..].copy_from_slice(hash.as_ref()); + key + } +} + +impl TrieStorage for TrieCachingStorage { + fn retrieve_raw_bytes(&self, hash: &CryptoHash) -> Result, StorageError> { + let mut guard = self.cache.0.lock().expect(POISONED_LOCK_ERR); + if let Some(val) = guard.pop(hash) { + Ok(val.clone()) + } else { + let key = Self::get_key_from_shard_uid_and_hash(self.shard_uid, hash); + let val = self + .store + .get(ColState, key.as_ref()) + .map_err(|_| StorageError::StorageInternalError)?; + if let Some(val) = val { + if val.len() < TRIE_LIMIT_CACHED_VALUE_SIZE { + guard.put(*hash, val.clone()); + } + Ok(val) + } else { + // not StorageError::TrieNodeMissing because it's only for TrieMemoryPartialStorage + Err(StorageError::StorageInconsistentState("Trie node missing".to_string())) + } + } + } + + fn as_caching_storage(&self) -> Option<&TrieCachingStorage> { + Some(self) + } +} + +/// Runtime counts the number of touched trie nodes for the purpose of gas calculation. +/// Trie increments it on every call to TrieStorage::retrieve_raw_bytes() +#[derive(Default)] +pub struct TouchedNodesCounter { + counter: AtomicU64, +} + +impl TouchedNodesCounter { + pub fn increment(&self) { + self.counter.fetch_add(1, Ordering::SeqCst); + } + + pub fn get(&self) -> u64 { + self.counter.load(Ordering::SeqCst) + } +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/trie_tests.rs b/mock-enclave/src/skw-vm-store/src/trie/trie_tests.rs new file mode 100644 index 0000000..289cb64 --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/trie_tests.rs @@ -0,0 +1,128 @@ +use crate::test_utils::{create_tries_complex, gen_changes, simplify_changes, test_populate_trie}; +use crate::trie::trie_storage::{TrieMemoryPartialStorage, TrieStorage}; +use crate::{PartialStorage, Trie, TrieUpdate}; +use near_primitives::errors::StorageError; +use near_primitives::hash::{hash, CryptoHash}; +use near_primitives::shard_layout::ShardUId; +use rand::seq::SliceRandom; +use rand::Rng; +use std::cell::RefCell; +use std::collections::{HashMap, HashSet}; +use std::fmt::Debug; +use std::rc::Rc; + +/// TrieMemoryPartialStorage, but contains only the first n requested nodes. +pub struct IncompletePartialStorage { + pub(crate) recorded_storage: HashMap>, + pub(crate) visited_nodes: RefCell>, + pub node_count_to_fail_after: usize, +} + +impl IncompletePartialStorage { + pub fn new(partial_storage: PartialStorage, nodes_count_to_fail_at: usize) -> Self { + let recorded_storage = + partial_storage.nodes.0.into_iter().map(|value| (hash(&value), value)).collect(); + Self { + recorded_storage, + visited_nodes: Default::default(), + node_count_to_fail_after: nodes_count_to_fail_at, + } + } +} + +impl TrieStorage for IncompletePartialStorage { + fn retrieve_raw_bytes(&self, hash: &CryptoHash) -> Result, StorageError> { + let result = self + .recorded_storage + .get(hash) + .map_or_else(|| Err(StorageError::TrieNodeMissing), |val| Ok(val.clone())); + + if result.is_ok() { + self.visited_nodes.borrow_mut().insert(*hash); + } + + if self.visited_nodes.borrow().len() > self.node_count_to_fail_after { + Err(StorageError::TrieNodeMissing) + } else { + result + } + } + + fn as_partial_storage(&self) -> Option<&TrieMemoryPartialStorage> { + // Make sure it's not called - it pretends to be PartialStorage but is not + unimplemented!() + } +} + +fn setup_storage(trie: Rc, test: &mut F) -> (PartialStorage, Out) +where + F: FnMut(Rc) -> Result, + Out: PartialEq + Debug, +{ + let recording_trie = Rc::new(trie.recording_reads()); + let output = test(Rc::clone(&recording_trie)).expect("should not fail"); + (recording_trie.recorded_storage().unwrap(), output) +} + +fn test_incomplete_storage(trie: Rc, mut test: F) +where + F: FnMut(Rc) -> Result, + Out: PartialEq + Debug, +{ + let (storage, expected) = setup_storage(Rc::clone(&trie), &mut test); + let size = storage.nodes.0.len(); + print!("Test touches {} nodes, expected result {:?}...", size, expected); + for i in 0..(size + 1) { + let storage = IncompletePartialStorage::new(storage.clone(), i); + let trie = Trie { storage: Box::new(storage), counter: Default::default() }; + let expected_result = + if i < size { Err(&StorageError::TrieNodeMissing) } else { Ok(&expected) }; + assert_eq!(test(Rc::new(trie)).as_ref(), expected_result); + } + println!("Success"); +} + +#[test] +fn test_reads_with_incomplete_storage() { + let mut rng = rand::thread_rng(); + for _ in 0..50 { + let tries = create_tries_complex(1, 2); + let shard_uid = ShardUId { version: 1, shard_id: 0 }; + let trie = tries.get_trie_for_shard(shard_uid); + let trie = Rc::new(trie); + let mut state_root = Trie::empty_root(); + let trie_changes = gen_changes(&mut rng, 20); + let trie_changes = simplify_changes(&trie_changes); + if trie_changes.is_empty() { + continue; + } + state_root = test_populate_trie(&tries, &state_root, shard_uid, trie_changes.clone()); + + { + let (key, _) = trie_changes.choose(&mut rng).unwrap(); + println!("Testing lookup {:?}", key); + let lookup_test = + |trie: Rc| -> Result<_, StorageError> { trie.get(&state_root, key) }; + test_incomplete_storage(Rc::clone(&trie), lookup_test); + } + { + println!("Testing TrieIterator over whole trie"); + let trie_records = |trie: Rc| -> Result<_, StorageError> { + let iterator = trie.iter(&state_root)?; + iterator.collect::, _>>() + }; + test_incomplete_storage(Rc::clone(&trie), trie_records); + } + { + let (key, _) = trie_changes.choose(&mut rng).unwrap(); + let key_prefix = &key[0..rng.gen_range(0, key.len() + 1)]; + println!("Testing TrieUpdateIterator over prefix {:?}", key_prefix); + let trie_update_keys = |trie: Rc| -> Result<_, StorageError> { + let trie_update = TrieUpdate::new(trie, state_root); + let keys = trie_update.iter(key_prefix)?.collect::, _>>()?; + Ok(keys) + }; + test_incomplete_storage(Rc::clone(&trie), trie_update_keys); + } + } +} diff --git a/mock-enclave/src/skw-vm-store/src/trie/update.rs b/mock-enclave/src/skw-vm-store/src/trie/update.rs new file mode 100644 index 0000000..200135b --- /dev/null +++ b/mock-enclave/src/skw-vm-store/src/trie/update.rs @@ -0,0 +1,511 @@ +use std::collections::BTreeMap; +use std::iter::Peekable; + +use near_primitives::hash::CryptoHash; +use near_primitives::types::{ + RawStateChange, RawStateChanges, RawStateChangesWithTrieKey, StateChangeCause, +}; + +use crate::trie::TrieChanges; +use crate::StorageError; + +use super::{Trie, TrieIterator}; +use near_primitives::trie_key::TrieKey; +use std::rc::Rc; + +/// Key-value update. Contains a TrieKey and a value. +pub struct TrieKeyValueUpdate { + pub trie_key: TrieKey, + pub value: Option>, +} + +/// key that was updated -> the update. +pub type TrieUpdates = BTreeMap, TrieKeyValueUpdate>; + +/// Provides a way to access Storage and record changes with future commit. +pub struct TrieUpdate { + pub trie: Rc, + root: CryptoHash, + committed: RawStateChanges, + prospective: TrieUpdates, +} + +pub enum TrieUpdateValuePtr<'a> { + HashAndSize(&'a Trie, u32, CryptoHash), + MemoryRef(&'a Vec), +} + +impl<'a> TrieUpdateValuePtr<'a> { + pub fn len(&self) -> u32 { + match self { + TrieUpdateValuePtr::MemoryRef(value) => value.len() as u32, + TrieUpdateValuePtr::HashAndSize(_, length, _) => *length, + } + } + + pub fn deref_value(&self) -> Result, StorageError> { + match self { + TrieUpdateValuePtr::MemoryRef(value) => Ok((*value).clone()), + TrieUpdateValuePtr::HashAndSize(trie, _, hash) => trie.retrieve_raw_bytes(hash), + } + } +} + +impl TrieUpdate { + pub fn new(trie: Rc, root: CryptoHash) -> Self { + TrieUpdate { trie, root, committed: Default::default(), prospective: Default::default() } + } + + pub fn trie(&self) -> &Trie { + self.trie.as_ref() + } + + pub fn get(&self, key: &TrieKey) -> Result>, StorageError> { + let key = key.to_vec(); + if let Some(key_value) = self.prospective.get(&key) { + return Ok(key_value.value.as_ref().map(>::clone)); + } else if let Some(changes_with_trie_key) = self.committed.get(&key) { + if let Some(RawStateChange { data, .. }) = changes_with_trie_key.changes.last() { + return Ok(data.as_ref().map(>::clone)); + } + } + + self.trie.get(&self.root, &key) + } + + pub fn get_ref(&self, key: &TrieKey) -> Result>, StorageError> { + let key = key.to_vec(); + if let Some(key_value) = self.prospective.get(&key) { + return Ok(key_value.value.as_ref().map(TrieUpdateValuePtr::MemoryRef)); + } else if let Some(changes_with_trie_key) = self.committed.get(&key) { + if let Some(RawStateChange { data, .. }) = changes_with_trie_key.changes.last() { + return Ok(data.as_ref().map(TrieUpdateValuePtr::MemoryRef)); + } + } + self.trie.get_ref(&self.root, &key).map(|option| { + option.map(|(length, hash)| TrieUpdateValuePtr::HashAndSize(&self.trie, length, hash)) + }) + } + + pub fn set(&mut self, trie_key: TrieKey, value: Vec) { + // NOTE: Converting `TrieKey` to a `Vec` is useful here for 2 reasons: + // - Using `Vec` for sorting `BTreeMap` in the same order as a `Trie` and + // avoid recomputing `Vec` every time. It helps for merging iterators. + // - Using `TrieKey` later for `RawStateChangesWithTrieKey` for State changes RPCs. + self.prospective + .insert(trie_key.to_vec(), TrieKeyValueUpdate { trie_key, value: Some(value) }); + } + pub fn remove(&mut self, trie_key: TrieKey) { + self.prospective.insert(trie_key.to_vec(), TrieKeyValueUpdate { trie_key, value: None }); + } + + pub fn commit(&mut self, event: StateChangeCause) { + let prospective = std::mem::take(&mut self.prospective); + for (raw_key, TrieKeyValueUpdate { trie_key, value }) in prospective.into_iter() { + self.committed + .entry(raw_key) + .or_insert_with(|| RawStateChangesWithTrieKey { trie_key, changes: Vec::new() }) + .changes + .push(RawStateChange { cause: event.clone(), data: value }); + } + } + + pub fn rollback(&mut self) { + self.prospective.clear(); + } + + pub fn finalize(self) -> Result<(TrieChanges, Vec), StorageError> { + assert!(self.prospective.is_empty(), "Finalize cannot be called with uncommitted changes."); + let TrieUpdate { trie, root, committed, .. } = self; + let mut state_changes = Vec::with_capacity(committed.len()); + let trie_changes = trie.update( + &root, + committed.into_iter().map(|(k, changes_with_trie_key)| { + let data = changes_with_trie_key + .changes + .last() + .expect("Committed entry should have at least one change") + .data + .clone(); + state_changes.push(changes_with_trie_key); + (k, data) + }), + )?; + Ok((trie_changes, state_changes)) + } + + pub fn finalize_genesis(self) -> Result { + assert!(self.prospective.is_empty(), "Finalize cannot be called with uncommitted changes."); + let TrieUpdate { trie, root, committed, .. } = self; + let trie_changes = trie.update( + &root, + committed.into_iter().map(|(k, changes_with_trie_key)| { + let data = changes_with_trie_key + .changes + .into_iter() + .last() + .expect("Committed entry should have at least one change") + .data; + (k, data) + }), + )?; + Ok(trie_changes) + } + + /// Returns Error if the underlying storage fails + pub fn iter(&self, key_prefix: &[u8]) -> Result, StorageError> { + TrieUpdateIterator::new(self, key_prefix, b"", None) + } + + pub fn range( + &self, + prefix: &[u8], + start: &[u8], + end: &[u8], + ) -> Result, StorageError> { + TrieUpdateIterator::new(self, prefix, start, Some(end)) + } + + pub fn get_root(&self) -> CryptoHash { + self.root + } +} + +struct MergeIter<'a> { + left: Peekable, &'a Option>)> + 'a>>, + right: Peekable, &'a Option>)> + 'a>>, +} + +impl<'a> Iterator for MergeIter<'a> { + type Item = (&'a Vec, &'a Option>); + + fn next(&mut self) -> Option { + let res = match (self.left.peek(), self.right.peek()) { + (Some(&(ref left_key, _)), Some(&(ref right_key, _))) => left_key.cmp(right_key), + (Some(_), None) => std::cmp::Ordering::Less, + (None, Some(_)) => std::cmp::Ordering::Greater, + (None, None) => return None, + }; + + // Check which elements comes first and only advance the corresponding iterator. + // If two keys are equal, take the value from `right`. + match res { + std::cmp::Ordering::Less => self.left.next(), + std::cmp::Ordering::Greater => self.right.next(), + std::cmp::Ordering::Equal => { + self.left.next(); + self.right.next() + } + } + } +} + +pub struct TrieUpdateIterator<'a> { + prefix: Vec, + end_offset: Option>, + trie_iter: Peekable>, + overlay_iter: Peekable>, +} + +impl<'a> TrieUpdateIterator<'a> { + #![allow(clippy::new_ret_no_self)] + pub fn new( + state_update: &'a TrieUpdate, + prefix: &[u8], + start: &[u8], + end: Option<&[u8]>, + ) -> Result { + let mut trie_iter = state_update.trie.iter(&state_update.root)?; + let mut start_offset = prefix.to_vec(); + start_offset.extend_from_slice(start); + let end_offset = match end { + Some(end) => { + let mut p = prefix.to_vec(); + p.extend_from_slice(end); + Some(p) + } + None => None, + }; + trie_iter.seek(&start_offset)?; + let committed_iter = state_update.committed.range(start_offset.clone()..).map( + |(raw_key, changes_with_trie_key)| { + ( + raw_key, + &changes_with_trie_key + .changes + .last() + .as_ref() + .expect("Committed entry should have at least one change.") + .data, + ) + }, + ); + let prospective_iter = state_update + .prospective + .range(start_offset..) + .map(|(raw_key, key_value)| (raw_key, &key_value.value)); + let overlay_iter = MergeIter { + left: (Box::new(committed_iter) as Box>).peekable(), + right: (Box::new(prospective_iter) as Box>).peekable(), + } + .peekable(); + Ok(TrieUpdateIterator { + prefix: prefix.to_vec(), + end_offset, + trie_iter: trie_iter.peekable(), + overlay_iter, + }) + } +} + +impl<'a> Iterator for TrieUpdateIterator<'a> { + type Item = Result, StorageError>; + + fn next(&mut self) -> Option { + let stop_cond = |key: &Vec, prefix: &Vec, end_offset: &Option>| { + !key.starts_with(prefix) + || match end_offset { + Some(end) => key >= end, + None => false, + } + }; + enum Ordering { + Trie, + Overlay, + Both, + } + // Usually one iteration, unless need to skip None values in prospective / committed. + loop { + let res = { + match (self.trie_iter.peek(), self.overlay_iter.peek()) { + (Some(&Ok((ref left_key, _))), Some(&(ref right_key, _))) => { + match ( + stop_cond(left_key, &self.prefix, &self.end_offset), + stop_cond(*right_key, &self.prefix, &self.end_offset), + ) { + (false, false) => { + if left_key < *right_key { + Ordering::Trie + } else if &left_key == right_key { + Ordering::Both + } else { + Ordering::Overlay + } + } + (false, true) => Ordering::Trie, + (true, false) => Ordering::Overlay, + (true, true) => { + return None; + } + } + } + (Some(&Ok((ref left_key, _))), None) => { + if stop_cond(left_key, &self.prefix, &self.end_offset) { + return None; + } + Ordering::Trie + } + (None, Some(&(right_key, _))) => { + if stop_cond(right_key, &self.prefix, &self.end_offset) { + return None; + } + Ordering::Overlay + } + (None, None) => return None, + (Some(&Err(ref e)), _) => return Some(Err(e.clone())), + } + }; + + // Check which elements comes first and only advance the corresponding iterator. + // If two keys are equal, take the value from `right`. + return match res { + Ordering::Trie => match self.trie_iter.next() { + Some(Ok((key, _value))) => Some(Ok(key)), + _ => None, + }, + Ordering::Overlay => match self.overlay_iter.next() { + Some((key, Some(_))) => Some(Ok(key.clone())), + Some((_, None)) => continue, + None => None, + }, + Ordering::Both => { + self.trie_iter.next(); + match self.overlay_iter.next() { + Some((key, Some(_))) => Some(Ok(key.clone())), + Some((_, None)) => continue, + None => None, + } + } + }; + } + } +} + +#[cfg(test)] +mod tests { + use crate::test_utils::{create_tries, create_tries_complex}; + + use super::*; + use crate::ShardUId; + const SHARD_VERSION: u32 = 1; + const COMPLEX_SHARD_UID: ShardUId = ShardUId { version: SHARD_VERSION, shard_id: 0 }; + + fn test_key(key: Vec) -> TrieKey { + TrieKey::ContractData { account_id: "alice".parse().unwrap(), key } + } + + #[test] + fn trie() { + let tries = create_tries_complex(SHARD_VERSION, 2); + let root = CryptoHash::default(); + let mut trie_update = tries.new_trie_update(COMPLEX_SHARD_UID, root); + trie_update.set(test_key(b"dog".to_vec()), b"puppy".to_vec()); + trie_update.set(test_key(b"dog2".to_vec()), b"puppy".to_vec()); + trie_update.set(test_key(b"xxx".to_vec()), b"puppy".to_vec()); + trie_update + .commit(StateChangeCause::TransactionProcessing { tx_hash: CryptoHash::default() }); + let trie_changes = trie_update.finalize().unwrap().0; + let (store_update, new_root) = tries.apply_all(&trie_changes, COMPLEX_SHARD_UID).unwrap(); + store_update.commit().unwrap(); + let trie_update2 = tries.new_trie_update(COMPLEX_SHARD_UID, new_root); + assert_eq!(trie_update2.get(&test_key(b"dog".to_vec())), Ok(Some(b"puppy".to_vec()))); + let values = trie_update2 + .iter(&test_key(b"dog".to_vec()).to_vec()) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!( + values, + vec![test_key(b"dog".to_vec()).to_vec(), test_key(b"dog2".to_vec()).to_vec()] + ); + } + + #[test] + fn trie_remove() { + let tries = create_tries_complex(SHARD_VERSION, 2); + + // Delete non-existing element. + let mut trie_update = tries.new_trie_update(COMPLEX_SHARD_UID, CryptoHash::default()); + trie_update.remove(test_key(b"dog".to_vec())); + trie_update + .commit(StateChangeCause::TransactionProcessing { tx_hash: CryptoHash::default() }); + let trie_changes = trie_update.finalize().unwrap().0; + let (store_update, new_root) = tries.apply_all(&trie_changes, COMPLEX_SHARD_UID).unwrap(); + store_update.commit().unwrap(); + assert_eq!(new_root, CryptoHash::default()); + + // Add and right away delete element. + let mut trie_update = tries.new_trie_update(COMPLEX_SHARD_UID, CryptoHash::default()); + trie_update.set(test_key(b"dog".to_vec()), b"puppy".to_vec()); + trie_update.remove(test_key(b"dog".to_vec())); + trie_update + .commit(StateChangeCause::TransactionProcessing { tx_hash: CryptoHash::default() }); + let trie_changes = trie_update.finalize().unwrap().0; + let (store_update, new_root) = tries.apply_all(&trie_changes, COMPLEX_SHARD_UID).unwrap(); + store_update.commit().unwrap(); + assert_eq!(new_root, CryptoHash::default()); + + // Add, apply changes and then delete element. + let mut trie_update = tries.new_trie_update(COMPLEX_SHARD_UID, CryptoHash::default()); + trie_update.set(test_key(b"dog".to_vec()), b"puppy".to_vec()); + trie_update + .commit(StateChangeCause::TransactionProcessing { tx_hash: CryptoHash::default() }); + let trie_changes = trie_update.finalize().unwrap().0; + let (store_update, new_root) = tries.apply_all(&trie_changes, COMPLEX_SHARD_UID).unwrap(); + store_update.commit().unwrap(); + assert_ne!(new_root, CryptoHash::default()); + let mut trie_update = tries.new_trie_update(COMPLEX_SHARD_UID, new_root); + trie_update.remove(test_key(b"dog".to_vec())); + trie_update + .commit(StateChangeCause::TransactionProcessing { tx_hash: CryptoHash::default() }); + let trie_changes = trie_update.finalize().unwrap().0; + let (store_update, new_root) = tries.apply_all(&trie_changes, COMPLEX_SHARD_UID).unwrap(); + store_update.commit().unwrap(); + assert_eq!(new_root, CryptoHash::default()); + } + + #[test] + fn trie_iter() { + let tries = create_tries(); + let mut trie_update = + tries.new_trie_update(ShardUId::single_shard(), CryptoHash::default()); + trie_update.set(test_key(b"dog".to_vec()), b"puppy".to_vec()); + trie_update.set(test_key(b"aaa".to_vec()), b"puppy".to_vec()); + trie_update + .commit(StateChangeCause::TransactionProcessing { tx_hash: CryptoHash::default() }); + let trie_changes = trie_update.finalize().unwrap().0; + let (store_update, new_root) = + tries.apply_all(&trie_changes, ShardUId::single_shard()).unwrap(); + store_update.commit().unwrap(); + + let mut trie_update = tries.new_trie_update(ShardUId::single_shard(), new_root); + trie_update.set(test_key(b"dog2".to_vec()), b"puppy".to_vec()); + trie_update.set(test_key(b"xxx".to_vec()), b"puppy".to_vec()); + + let values: Result>, _> = + trie_update.iter(&test_key(b"dog".to_vec()).to_vec()).unwrap().collect(); + assert_eq!( + values.unwrap(), + vec![test_key(b"dog".to_vec()).to_vec(), test_key(b"dog2".to_vec()).to_vec()] + ); + + trie_update.rollback(); + + let values: Result>, _> = + trie_update.iter(&test_key(b"dog".to_vec()).to_vec()).unwrap().collect(); + assert_eq!(values.unwrap(), vec![test_key(b"dog".to_vec()).to_vec()]); + + let mut trie_update = tries.new_trie_update(ShardUId::single_shard(), new_root); + trie_update.remove(test_key(b"dog".to_vec())); + + let values: Result>, _> = + trie_update.iter(&test_key(b"dog".to_vec()).to_vec()).unwrap().collect(); + assert_eq!(values.unwrap().len(), 0); + + let mut trie_update = tries.new_trie_update(ShardUId::single_shard(), new_root); + trie_update.set(test_key(b"dog2".to_vec()), b"puppy".to_vec()); + trie_update + .commit(StateChangeCause::TransactionProcessing { tx_hash: CryptoHash::default() }); + trie_update.remove(test_key(b"dog2".to_vec())); + + let values: Result>, _> = + trie_update.iter(&test_key(b"dog".to_vec()).to_vec()).unwrap().collect(); + assert_eq!(values.unwrap(), vec![test_key(b"dog".to_vec()).to_vec()]); + + let mut trie_update = tries.new_trie_update(ShardUId::single_shard(), new_root); + trie_update.set(test_key(b"dog2".to_vec()), b"puppy".to_vec()); + trie_update + .commit(StateChangeCause::TransactionProcessing { tx_hash: CryptoHash::default() }); + trie_update.set(test_key(b"dog3".to_vec()), b"puppy".to_vec()); + + let values: Result>, _> = + trie_update.iter(&test_key(b"dog".to_vec()).to_vec()).unwrap().collect(); + assert_eq!( + values.unwrap(), + vec![ + test_key(b"dog".to_vec()).to_vec(), + test_key(b"dog2".to_vec()).to_vec(), + test_key(b"dog3".to_vec()).to_vec() + ] + ); + + let values: Result>, _> = + trie_update.range(&test_key(b"do".to_vec()).to_vec(), b"g", b"g21").unwrap().collect(); + assert_eq!( + values.unwrap(), + vec![test_key(b"dog".to_vec()).to_vec(), test_key(b"dog2".to_vec()).to_vec(),] + ); + + let values: Result>, _> = + trie_update.range(&test_key(b"do".to_vec()).to_vec(), b"", b"xyz").unwrap().collect(); + + assert_eq!( + values.unwrap(), + vec![ + test_key(b"dog".to_vec()).to_vec(), + test_key(b"dog2".to_vec()).to_vec(), + test_key(b"dog3".to_vec()).to_vec() + ] + ); + } +} diff --git a/skw-contract-sdk/examples/lockable-fungible-token/Cargo.lock b/skw-contract-sdk/examples/lockable-fungible-token/Cargo.lock index 7a4cec9..18dd88d 100644 --- a/skw-contract-sdk/examples/lockable-fungible-token/Cargo.lock +++ b/skw-contract-sdk/examples/lockable-fungible-token/Cargo.lock @@ -38,6 +38,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" + [[package]] name = "base64" version = "0.13.0" @@ -798,7 +804,7 @@ checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" name = "skw-contract-sdk" version = "0.1.0" dependencies = [ - "base64", + "base64 0.13.0", "borsh", "bs58", "serde", @@ -828,7 +834,7 @@ version = "0.1.0" name = "skw-vm-host" version = "0.0.0" dependencies = [ - "base64", + "base64 0.13.0", "bs58", "byteorder", "near-crypto", @@ -844,12 +850,28 @@ dependencies = [ name = "skw-vm-primitives" version = "0.1.0" dependencies = [ + "base64 0.11.0", + "borsh", + "bs58", + "near-crypto", "num-rational 0.3.2", "serde", "sha2", + "smart-default", "wasmi", ] +[[package]] +name = "smart-default" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "133659a15339456eeeb07572eb02a91c91e9815e9cbc89566944d2c8d3efdbf6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "spin" version = "0.9.2" diff --git a/skw-contract-sdk/examples/status-message/Cargo.lock b/skw-contract-sdk/examples/status-message/Cargo.lock index 38279a7..e26c9f9 100644 --- a/skw-contract-sdk/examples/status-message/Cargo.lock +++ b/skw-contract-sdk/examples/status-message/Cargo.lock @@ -38,6 +38,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" + [[package]] name = "base64" version = "0.13.0" @@ -791,7 +797,7 @@ checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" name = "skw-contract-sdk" version = "0.1.0" dependencies = [ - "base64", + "base64 0.13.0", "borsh", "bs58", "serde", @@ -821,7 +827,7 @@ version = "0.1.0" name = "skw-vm-host" version = "0.0.0" dependencies = [ - "base64", + "base64 0.13.0", "bs58", "byteorder", "near-crypto", @@ -837,12 +843,28 @@ dependencies = [ name = "skw-vm-primitives" version = "0.1.0" dependencies = [ + "base64 0.11.0", + "borsh", + "bs58", + "near-crypto", "num-rational 0.3.2", "serde", "sha2", + "smart-default", "wasmi", ] +[[package]] +name = "smart-default" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "133659a15339456eeeb07572eb02a91c91e9815e9cbc89566944d2c8d3efdbf6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "spin" version = "0.9.2" diff --git a/skw-contract-sdk/skw-contract-sdk/src/environment/env.rs b/skw-contract-sdk/skw-contract-sdk/src/environment/env.rs index 9fc4e53..5f8b87b 100644 --- a/skw-contract-sdk/skw-contract-sdk/src/environment/env.rs +++ b/skw-contract-sdk/skw-contract-sdk/src/environment/env.rs @@ -177,11 +177,6 @@ pub fn block_timestamp() -> u64 { unsafe { sys::block_timestamp() } } -/// Current epoch height. -pub fn epoch_height() -> u64 { - unsafe { sys::epoch_height() } -} - /// Current total storage usage of this smart contract that this account would be paying for. pub fn storage_usage() -> StorageUsage { unsafe { sys::storage_usage() } @@ -412,10 +407,6 @@ pub fn promise_batch_then(promise_index: PromiseIndex, account_id: &AccountId) - } } -pub fn promise_batch_action_create_account(promise_index: PromiseIndex) { - unsafe { sys::promise_batch_action_create_account(promise_index) } -} - pub fn promise_batch_action_deploy_contract(promise_index: u64, code: &[u8]) { unsafe { sys::promise_batch_action_deploy_contract( @@ -446,24 +437,6 @@ pub fn promise_batch_action_function_call( } } -pub fn promise_batch_action_transfer(promise_index: PromiseIndex, amount: Balance) { - unsafe { sys::promise_batch_action_transfer(promise_index, &amount as *const Balance as _) } -} - -pub fn promise_batch_action_delete_account( - promise_index: PromiseIndex, - beneficiary_id: &AccountId, -) { - let beneficiary_id: &str = beneficiary_id.as_ref(); - unsafe { - sys::promise_batch_action_delete_account( - promise_index, - beneficiary_id.len() as _, - beneficiary_id.as_ptr() as _, - ) - } -} - /// If the current function is invoked by a callback we can access the execution results of the /// promises that caused the callback. This function returns the number of complete and /// incomplete callbacks. diff --git a/skw-contract-sdk/skw-contract-sdk/src/environment/mock/external.rs b/skw-contract-sdk/skw-contract-sdk/src/environment/mock/external.rs index 6dc9d41..9fb017d 100644 --- a/skw-contract-sdk/skw-contract-sdk/src/environment/mock/external.rs +++ b/skw-contract-sdk/skw-contract-sdk/src/environment/mock/external.rs @@ -79,15 +79,6 @@ impl External for SdkExternal { Ok(res) } - fn append_action_create_account(&mut self, receipt_index: u64) -> Result<()> { - self.receipts - .get_mut(receipt_index as usize) - .unwrap() - .actions - .push(VmAction::CreateAccount); - Ok(()) - } - fn append_action_deploy_contract(&mut self, receipt_index: u64, code: Vec) -> Result<()> { self.receipts .get_mut(receipt_index as usize) @@ -117,93 +108,7 @@ impl External for SdkExternal { ); Ok(()) } - - fn append_action_transfer(&mut self, receipt_index: u64, amount: u128) -> Result<()> { - self.receipts - .get_mut(receipt_index as usize) - .unwrap() - .actions - .push(VmAction::Transfer { deposit: amount }); - Ok(()) - } - - // fn append_action_stake( - // &mut self, - // receipt_index: u64, - // stake: u128, - // public_key: Vec, - // ) -> Result<()> { - // let public_key = PublicKey::try_from(public_key).unwrap(); - // self.receipts - // .get_mut(receipt_index as usize) - // .unwrap() - // .actions - // .push(VmAction::Stake { stake, public_key }); - // Ok(()) - // } - - // fn append_action_add_key_with_full_access( - // &mut self, - // receipt_index: u64, - // public_key: Vec, - // nonce: u64, - // ) -> Result<()> { - // let public_key = PublicKey::try_from(public_key).unwrap(); - // self.receipts - // .get_mut(receipt_index as usize) - // .unwrap() - // .actions - // .push(VmAction::AddKeyWithFullAccess { public_key, nonce }); - // Ok(()) - // } - - // fn append_action_add_key_with_function_call( - // &mut self, - // receipt_index: u64, - // public_key: Vec, - // nonce: u64, - // allowance: Option, - // receiver_id: VmAccountId, - // function_names: Vec>, - // ) -> Result<()> { - // let public_key = PublicKey::try_from(public_key).unwrap(); - // let function_names = - // function_names.into_iter().map(|s| String::from_utf8(s).unwrap()).collect(); - // self.receipts.get_mut(receipt_index as usize).unwrap().actions.push( - // VmAction::AddKeyWithFunctionCall { - // public_key, - // nonce, - // allowance, - // receiver_id: receiver_id.into(), - // function_names, - // }, - // ); - // Ok(()) - // } - - // fn append_action_delete_key(&mut self, receipt_index: u64, public_key: Vec) -> Result<()> { - // let public_key = PublicKey::try_from(public_key).unwrap(); - // self.receipts - // .get_mut(receipt_index as usize) - // .unwrap() - // .actions - // .push(VmAction::DeleteKey { public_key }); - // Ok(()) - // } - - fn append_action_delete_account( - &mut self, - receipt_index: u64, - beneficiary_id: VmAccountId, - ) -> Result<()> { - self.receipts - .get_mut(receipt_index as usize) - .ok_or(HostError::InvalidReceiptIndex { receipt_index })? - .actions - .push(VmAction::DeleteAccount { beneficiary_id: beneficiary_id.into() }); - Ok(()) - } - + fn get_touched_nodes_count(&self) -> u64 { 0 } diff --git a/skw-contract-sdk/skw-contract-sdk/src/environment/mock/mocked_blockchain.rs b/skw-contract-sdk/skw-contract-sdk/src/environment/mock/mocked_blockchain.rs index 478e76c..312c040 100644 --- a/skw-contract-sdk/skw-contract-sdk/src/environment/mock/mocked_blockchain.rs +++ b/skw-contract-sdk/skw-contract-sdk/src/environment/mock/mocked_blockchain.rs @@ -146,10 +146,6 @@ mod mock_chain { with_mock_interface(|b| b.block_timestamp()) } #[no_mangle] - extern "C" fn epoch_height() -> u64 { - with_mock_interface(|b| b.epoch_height()) - } - #[no_mangle] extern "C" fn storage_usage() -> u64 { with_mock_interface(|b| b.storage_usage()) } @@ -292,10 +288,6 @@ mod mock_chain { with_mock_interface(|b| b.promise_batch_then(promise_index, account_id_len, account_id_ptr)) } #[no_mangle] - extern "C" fn promise_batch_action_create_account(promise_index: u64) { - with_mock_interface(|b| b.promise_batch_action_create_account(promise_index)) - } - #[no_mangle] extern "C" fn promise_batch_action_deploy_contract( promise_index: u64, code_len: u64, @@ -328,24 +320,6 @@ mod mock_chain { }) } #[no_mangle] - extern "C" fn promise_batch_action_transfer(promise_index: u64, amount_ptr: u64) { - with_mock_interface(|b| b.promise_batch_action_transfer(promise_index, amount_ptr)) - } - #[no_mangle] - extern "C" fn promise_batch_action_delete_account( - promise_index: u64, - beneficiary_id_len: u64, - beneficiary_id_ptr: u64, - ) { - with_mock_interface(|b| { - b.promise_batch_action_delete_account( - promise_index, - beneficiary_id_len, - beneficiary_id_ptr, - ) - }) - } - #[no_mangle] extern "C" fn promise_results_count() -> u64 { with_mock_interface(|b| b.promise_results_count()) } diff --git a/skw-contract-sdk/skw-contract-sdk/src/environment/mock/mod.rs b/skw-contract-sdk/skw-contract-sdk/src/environment/mock/mod.rs index 2757bbe..457606d 100644 --- a/skw-contract-sdk/skw-contract-sdk/src/environment/mock/mod.rs +++ b/skw-contract-sdk/skw-contract-sdk/src/environment/mock/mod.rs @@ -7,7 +7,7 @@ pub use self::mocked_blockchain::MockedBlockchain; pub use self::receipt::{Receipt, VmAction}; use crate::AccountId; use core::cell::RefCell; -use skw_vm_primitives::errors::ParseAccountError; +use skw_vm_primitives::account_id::ParseAccountError; thread_local! { /// Low-level blockchain interface wrapped by the environment. Prefer using `env::*` and diff --git a/skw-contract-sdk/skw-contract-sdk/src/promise.rs b/skw-contract-sdk/skw-contract-sdk/src/promise.rs index 1b740d2..b52f8dc 100644 --- a/skw-contract-sdk/skw-contract-sdk/src/promise.rs +++ b/skw-contract-sdk/skw-contract-sdk/src/promise.rs @@ -7,7 +7,6 @@ use std::rc::Rc; use crate::{AccountId, Balance, Gas, PromiseIndex}; enum PromiseAction { - CreateAccount, DeployContract { code: Vec, }, @@ -17,19 +16,12 @@ enum PromiseAction { amount: Balance, gas: Gas, }, - Transfer { - amount: Balance, - }, - DeleteAccount { - beneficiary_id: AccountId, - }, } impl PromiseAction { pub fn add(&self, promise_index: PromiseIndex) { use PromiseAction::*; match self { - CreateAccount => crate::env::promise_batch_action_create_account(promise_index), DeployContract { code } => { crate::env::promise_batch_action_deploy_contract(promise_index, code) } @@ -42,12 +34,6 @@ impl PromiseAction { *gas, ) } - Transfer { amount } => { - crate::env::promise_batch_action_transfer(promise_index, *amount) - } - DeleteAccount { beneficiary_id } => { - crate::env::promise_batch_action_delete_account(promise_index, beneficiary_id) - } } } } @@ -133,14 +119,6 @@ impl PromiseJoint { /// * When they need to create a transaction with one or many actions, e.g. the following code /// schedules a transaction that creates an account, transfers tokens: /// -/// ```no_run -/// # use skw_contract_sdk::{Promise, env, test_utils::VMContextBuilder, testing_env}; -/// # testing_env!(VMContextBuilder::new().signer_account_id("bob.sk".parse().unwrap()) -/// # .account_balance(1000).prepaid_gas(1_000_000.into()).build()); -/// Promise::new("bob.sk".parse().unwrap()) -/// .create_account() -/// .transfer(1000); -/// ``` #[derive(Clone)] pub struct Promise { subtype: PromiseSubtype, @@ -190,11 +168,6 @@ impl Promise { self } - /// Create account on which this promise acts. - pub fn create_account(self) -> Self { - self.add_action(PromiseAction::CreateAccount) - } - /// Deploy a smart contract to the account on which this promise acts. pub fn deploy_contract(self, code: Vec) -> Self { self.add_action(PromiseAction::DeployContract { code }) @@ -211,29 +184,12 @@ impl Promise { self.add_action(PromiseAction::FunctionCall { function_name, arguments, amount, gas }) } - /// Transfer tokens to the account that this promise acts on. - pub fn transfer(self, amount: Balance) -> Self { - self.add_action(PromiseAction::Transfer { amount }) - } - - /// Delete the given account. - pub fn delete_account(self, beneficiary_id: AccountId) -> Self { - self.add_action(PromiseAction::DeleteAccount { beneficiary_id }) - } - /// Merge this promise with another promise, so that we can schedule execution of another /// smart contract right after all merged promises finish. /// /// Note, once the promises are merged it is not possible to add actions to them, e.g. the /// following code will panic during the execution of the smart contract: /// - /// ```no_run - /// # use skw_contract_sdk::{Promise, testing_env}; - /// let p1 = Promise::new("bob.sk".parse().unwrap()).create_account(); - /// let p2 = Promise::new("carol.sk".parse().unwrap()).create_account(); - /// let p3 = p1.and(p2); - /// // p3.create_account(); - /// ``` pub fn and(self, other: Promise) -> Promise { Promise { subtype: PromiseSubtype::Joint(Rc::new(PromiseJoint { @@ -250,14 +206,6 @@ impl Promise { /// In the following code `bob_near` and `dave_near` will be created concurrently. `carol_near` /// creation will wait for `bob_near` to be created, and `eva_near` will wait for both `carol_near` /// and `dave_near` to be created first. - /// ```no_run - /// # use skw_contract_sdk::{Promise, VMContext, testing_env}; - /// let p1 = Promise::new("bob_near".parse().unwrap()).create_account(); - /// let p2 = Promise::new("carol_near".parse().unwrap()).create_account(); - /// let p3 = Promise::new("dave_near".parse().unwrap()).create_account(); - /// let p4 = Promise::new("eva_near".parse().unwrap()).create_account(); - /// p1.then(p2).and(p3).then(p4); - /// ``` pub fn then(self, mut other: Promise) -> Promise { match &mut other.subtype { PromiseSubtype::Single(x) => *x.after.borrow_mut() = Some(self), diff --git a/skw-contract-sdk/skw-contract-sdk/src/test_utils/context.rs b/skw-contract-sdk/skw-contract-sdk/src/test_utils/context.rs index 21fa7ac..37ffa28 100644 --- a/skw-contract-sdk/skw-contract-sdk/src/test_utils/context.rs +++ b/skw-contract-sdk/skw-contract-sdk/src/test_utils/context.rs @@ -2,7 +2,7 @@ use crate::mock::MockedBlockchain; use crate::test_utils::test_env::*; use crate::AccountId; use crate::{ - Balance, BlockNumber, EpochHeight, Gas, PromiseResult, PublicKey, StorageUsage, VMContext, + Balance, BlockNumber, Gas, PromiseResult, PublicKey, StorageUsage, VMContext, }; use skw_vm_primitives::fees::RuntimeFeesConfig; use skw_vm_host::{VMConfig, ViewConfig}; @@ -39,7 +39,6 @@ impl VMContextBuilder { input: vec![], block_number: 0, block_timestamp: 0, - epoch_height: 0, account_balance: 10u128.pow(26), storage_usage: 1024 * 300, attached_deposit: 0, @@ -81,11 +80,6 @@ impl VMContextBuilder { self } - pub fn epoch_height(&mut self, epoch_height: EpochHeight) -> &mut Self { - self.context.epoch_height = epoch_height; - self - } - pub fn account_balance(&mut self, amount: Balance) -> &mut Self { self.context.account_balance = amount; self diff --git a/skw-contract-sdk/skw-sdk-sim/Cargo.toml b/skw-contract-sdk/skw-sdk-sim/Cargo.toml index d1c4ec7..9bafd26 100644 --- a/skw-contract-sdk/skw-sdk-sim/Cargo.toml +++ b/skw-contract-sdk/skw-sdk-sim/Cargo.toml @@ -2,7 +2,7 @@ name = "skw-sdk-sim" version = '0.1.0' authors = ['SkyeKiwi ', "Near Inc "] -edition = '2021' +edition = '2018' homepage = 'https://skye.kiwi' repository = 'https://github.com/skyekiwi/skyekiwi-network' license = 'GPL-3.0' @@ -11,16 +11,15 @@ description = """ NEAR Simulator & cross-contract testing library """ - [dependencies] skw-contract-sdk = { path = "../skw-contract-sdk" } +skw-vm-host = { path = "../../mock-enclave/src/skw-vm-host" } + near-crypto = "=0.1.0" -near-primitives = "=0.1.0-pre.1" -near-vm-logic = "=4.0.0-pre.1" +near-primitives = "=0.1.0-pre.1" near-pool = "=0.1.0-pre.1" near-store = "=0.1.0-pre.1" -near-runtime = "=4.0.0-pre.1" lazy-static-include = "3" # Temporary workaround see https://github.com/bitvecto-rs/bitvec/issues/105 @@ -33,6 +32,5 @@ fungible-token = { path="../examples/fungible-token/ft" } [features] default = [] -no_cache = ["near-store/no_cache", "near-runtime/no_cache"] no_sim = [] no_contract_cache = [] diff --git a/skw-contract-sdk/skw-sdk-sim/src/lib.rs b/skw-contract-sdk/skw-sdk-sim/src/lib.rs index eee2f9b..dc72272 100644 --- a/skw-contract-sdk/skw-sdk-sim/src/lib.rs +++ b/skw-contract-sdk/skw-sdk-sim/src/lib.rs @@ -1,6 +1,6 @@ -//! # near_sdk_sim +//! # skw_sdk_sim //! -//! This crate provides an interface for simulating transactions on NEAR's Blockchain. +//! This crate provides an interface for simulating transactions on SkyeKiwi Offchain VM - NEAR's Blockchain. //! The simulator uses a standalone runtime that can handle any of the [actions](https://nomicon.io/RuntimeSpec/Actions.html) provided by the //! real runtime, including: creating accounts, deploying contracts, making contract calls and //! calling view methods. @@ -14,7 +14,7 @@ pub mod units; pub mod user; pub use near_crypto; #[doc(hidden)] -pub use near_primitives::*; +pub use skw_vm_primitives::*; #[doc(inline)] pub use units::*; #[doc(inline)] diff --git a/skw-contract-sdk/skw-sdk-sim/src/runtime.rs b/skw-contract-sdk/skw-sdk-sim/src/runtime.rs index 1a0dc3e..3fbc036 100644 --- a/skw-contract-sdk/skw-sdk-sim/src/runtime.rs +++ b/skw-contract-sdk/skw-sdk-sim/src/runtime.rs @@ -5,6 +5,7 @@ use crate::cache::{cache_to_arc, create_cache, ContractCache}; use crate::ViewResult; use near_crypto::{InMemorySigner, KeyType, PublicKey, Signer}; use near_pool::{types::PoolIterator, TransactionPool}; + use near_primitives::account::{AccessKey, Account}; use near_primitives::errors::RuntimeError; use near_primitives::hash::CryptoHash; @@ -21,13 +22,13 @@ use near_primitives::types::{ }; use near_primitives::version::PROTOCOL_VERSION; use near_primitives::views::ViewApplyState; + use near_runtime::{state_viewer::TrieViewer, ApplyState, Runtime}; use near_sdk::{AccountId, Duration}; use near_store::{ get_access_key, get_account, set_account, test_utils::create_test_store, ShardTries, Store, }; -const DEFAULT_EPOCH_LENGTH: u64 = 3; const DEFAULT_BLOCK_PROD_TIME: Duration = 1_000_000_000; pub fn init_runtime( @@ -47,7 +48,6 @@ pub struct GenesisConfig { pub gas_price: Balance, pub gas_limit: Gas, pub genesis_height: u64, - pub epoch_length: u64, pub block_prod_time: Duration, pub runtime_config: RuntimeConfig, pub state_records: Vec, @@ -67,7 +67,6 @@ impl Default for GenesisConfig { gas_price: 100_000_000, gas_limit: runtime_config.wasm_config.limit_config.max_total_prepaid_gas, genesis_height: 0, - epoch_length: DEFAULT_EPOCH_LENGTH, block_prod_time: DEFAULT_BLOCK_PROD_TIME, runtime_config, state_records: vec![], @@ -98,7 +97,6 @@ impl GenesisConfig { pub struct Block { prev_block: Option>, state_root: CryptoHash, - pub epoch_height: EpochHeight, pub block_height: BlockHeight, pub block_timestamp: u64, pub gas_price: Balance, @@ -122,7 +120,6 @@ impl Block { prev_block: None, state_root: CryptoHash::default(), block_height: genesis_config.genesis_height, - epoch_height: 0, block_timestamp: genesis_config.genesis_time, gas_price: genesis_config.gas_price, gas_limit: genesis_config.gas_limit, @@ -132,7 +129,6 @@ impl Block { fn produce( &self, new_state_root: CryptoHash, - epoch_length: u64, block_prod_time: Duration, ) -> Block { Self { @@ -142,56 +138,88 @@ impl Block { prev_block: Some(Arc::new(self.clone())), state_root: new_state_root, block_height: self.block_height + 1, - epoch_height: (self.block_height + 1) / epoch_length, } } } pub struct RuntimeStandalone { pub genesis: GenesisConfig, + + // TODO: port this tx_pool: TransactionPool, transactions: HashMap, outcomes: HashMap, profile: HashMap, pub cur_block: Block, + + // TODO: port this runtime: Runtime, + + // TODO: port this / simplify this/ sim this tries: ShardTries, + pending_receipts: Vec, - epoch_info_provider: Box, - pub last_outcomes: Vec, + cache: ContractCache, } impl RuntimeStandalone { pub fn new(genesis: GenesisConfig, store: Arc) -> Self { let mut genesis_block = Block::genesis(&genesis); + + // STORE:: here store_update let mut store_update = store.store_update(); + + // RUNTIME: runtime::new() let runtime = Runtime::new(); + + // TRIES:: ShardTries::new + // TRIES:: initialize from the passed in store let tries = ShardTries::new(store, 1); + + + // RUNTIME:: apply_genesis_state + // pub fn apply_genesis_state( + // &self, + // tries: ShardTries, + // shard_id: ShardId, + // validators: &[(AccountId, PublicKey, Balance)], + // genesis: &Genesis, + // config: &RuntimeConfig, + // shard_account_ids: HashSet, + // ) -> StateRoot { + // GenesisStateApplier::apply(tries, shard_id, validators, config, genesis, shard_account_ids) + // } + // Q: shouldn't there just be a state_root?? let (s_update, state_root) = runtime.apply_genesis_state( - tries.clone(), - 0, - &[], - &genesis.state_records, - &genesis.runtime_config, + tries.clone(), // ShardTries + 0, // can remove ShardId + &[], // remove Validators + &genesis.state_records, // Genesis + &genesis.runtime_config, // RuntimeConfig ); + + // Get rid of these store_update.merge(s_update); store_update.commit().unwrap(); + genesis_block.state_root = state_root; + + // get rid of these as well ... let validators = genesis.validators.clone(); + Self { - genesis, - tries, + genesis, // GenesisConfig + tries, // SharedTres runtime, + + // Q: what does this do? Don't we have a txpool?? transactions: HashMap::new(), outcomes: HashMap::new(), profile: HashMap::new(), cur_block: genesis_block, tx_pool: TransactionPool::new(), pending_receipts: vec![], - epoch_info_provider: Box::new(MockEpochInfoProvider::new( - validators.into_iter().map(|info| (info.account_id, info.amount)), - )), cache: create_cache(), last_outcomes: vec![], } @@ -276,6 +304,17 @@ impl RuntimeStandalone { block_hash: Default::default(), }; + // pub fn apply( + // &self, + // trie: Trie, + // root: CryptoHash, + // validator_accounts_update: &Option, + // apply_state: &ApplyState, + // incoming_receipts: &[Receipt], + // transactions: &[SignedTransaction], + // ) -> Result { + + // RUNTIME:: runtime.apply seems to be the critical method for importing states let apply_result = self.runtime.apply( self.tries.get_trie_for_shard(0), self.cur_block.state_root, @@ -283,17 +322,18 @@ impl RuntimeStandalone { &apply_state, &self.pending_receipts, &Self::prepare_transactions(&mut self.tx_pool), - self.epoch_info_provider.as_ref(), )?; - self.pending_receipts = apply_result.outgoing_receipts; + apply_result.outcomes.iter().for_each(|outcome| { self.last_outcomes.push(outcome.id); self.outcomes.insert(outcome.id, outcome.outcome.clone()); self.profile.insert(outcome.id, profile_data.clone()); }); + let (update, _) = self.tries.apply_all(&apply_result.trie_changes, 0).expect("Unexpected Storage error"); update.commit().expect("Unexpected io error"); + self.cur_block = self.cur_block.produce( apply_result.state_root, self.genesis.epoch_length, @@ -321,16 +361,16 @@ impl RuntimeStandalone { Ok(()) } - /// Force alter account and change state_root. - pub fn force_account_update(&mut self, account_id: AccountId, account: &Account) { - let mut trie_update = self.tries.new_trie_update(0, self.cur_block.state_root); - set_account(&mut trie_update, String::from(account_id), account); - trie_update.commit(StateChangeCause::ValidatorAccountsUpdate); - let (trie_changes, _) = trie_update.finalize().expect("Unexpected Storage error"); - let (store_update, new_root) = self.tries.apply_all(&trie_changes, 0).unwrap(); - store_update.commit().expect("No io errors expected"); - self.cur_block.state_root = new_root; - } + // /// Force alter account and change state_root. + // pub fn force_account_update(&mut self, account_id: AccountId, account: &Account) { + // let mut trie_update = self.tries.new_trie_update(0, self.cur_block.state_root); + // set_account(&mut trie_update, String::from(account_id), account); + // trie_update.commit(StateChangeCause::ValidatorAccountsUpdate); + // let (trie_changes, _) = trie_update.finalize().expect("Unexpected Storage error"); + // let (store_update, new_root) = self.tries.apply_all(&trie_changes, 0).unwrap(); + // store_update.commit().expect("No io errors expected"); + // self.cur_block.state_root = new_root; + // } pub fn view_account(&self, account_id: &str) -> Option { let trie_update = self.tries.new_trie_update(0, self.cur_block.state_root); @@ -370,9 +410,6 @@ impl RuntimeStandalone { function_name, args, &mut logs, - self.epoch_info_provider.as_ref(), - ); - ViewResult::new(result, logs) } /// Returns a reference to the current block. @@ -533,14 +570,14 @@ mod tests { assert_eq!("\"caller status is ok!\"", caller_status); } - #[test] - fn test_force_update_account() { - let (mut runtime, _, _) = init_runtime(None); - let mut bob_account = runtime.view_account("root").unwrap(); - bob_account.locked = 10000; - runtime.force_account_update("root".parse().unwrap(), &bob_account); - assert_eq!(runtime.view_account("root").unwrap().locked, 10000); - } + // #[test] + // fn test_force_update_account() { + // let (mut runtime, _, _) = init_runtime(None); + // let mut bob_account = runtime.view_account("root").unwrap(); + // bob_account.locked = 10000; + // runtime.force_account_update("root".parse().unwrap(), &bob_account); + // assert_eq!(runtime.view_account("root").unwrap().locked, 10000); + // } #[test] fn can_produce_many_blocks_without_stack_overflow() { diff --git a/skw-contract-sdk/skw-sdk-sim/src/user.rs b/skw-contract-sdk/skw-sdk-sim/src/user.rs index 238102b..445d9f0 100644 --- a/skw-contract-sdk/skw-sdk-sim/src/user.rs +++ b/skw-contract-sdk/skw-sdk-sim/src/user.rs @@ -9,6 +9,8 @@ use skw_contract_sdk::PendingContractTx; use crate::runtime::init_runtime; pub use crate::to_yocto; + +// TODO: these are primitives imports use crate::{ account::{AccessKey, Account}, hash::CryptoHash, @@ -62,12 +64,6 @@ impl UserTransaction { outcome_into_result(res, &self.runtime) } - /// Create account for the receiver of the transaction. - pub fn create_account(mut self) -> Self { - self.transaction = self.transaction.create_account(); - self - } - /// Deploy Wasm binary pub fn deploy_contract(mut self, code: Vec) -> Self { self.transaction = self.transaction.deploy_contract(code); @@ -85,36 +81,6 @@ impl UserTransaction { self.transaction = self.transaction.function_call(function_name, args, gas, deposit); self } - - /// Transfer deposit to receiver - pub fn transfer(mut self, deposit: Balance) -> Self { - self.transaction = self.transaction.transfer(deposit); - self - } - - /// Express interest in becoming a validator - pub fn stake(mut self, stake: Balance, public_key: PublicKey) -> Self { - self.transaction = self.transaction.stake(stake, public_key); - self - } - - /// Add access key, either FunctionCall or FullAccess - pub fn add_key(mut self, public_key: PublicKey, access_key: AccessKey) -> Self { - self.transaction = self.transaction.add_key(public_key, access_key); - self - } - - /// Delete an access key - pub fn delete_key(mut self, public_key: PublicKey) -> Self { - self.transaction = self.transaction.delete_key(public_key); - self - } - - /// Delete an account and send remaining balance to `beneficiary_id` - pub fn delete_account(mut self, beneficiary_id: AccountId) -> Self { - self.transaction = self.transaction.delete_account(String::from(beneficiary_id)); - self - } } /// A user that can sign transactions. It includes a signer and an account id. @@ -149,10 +115,11 @@ impl UserAccount { pub fn account(&self) -> Option { (*self.runtime).borrow().view_account(self.account_id.as_str()) } - /// Transfer yoctoNear to another account - pub fn transfer(&self, to: AccountId, deposit: Balance) -> ExecutionResult { - self.submit_transaction(self.transaction(to).transfer(deposit)) - } + + // /// Transfer yoctoNear to another account + // pub fn transfer(&self, to: AccountId, deposit: Balance) -> ExecutionResult { + // self.submit_transaction(self.transaction(to).transfer(deposit)) + // } /// Make a contract call. `pending_tx` includes the receiver, the method to call as well as its arguments. /// Note: You will most likely not be using this method directly but rather the [`call!`](./macro.call.html) macro. @@ -378,7 +345,7 @@ pub fn init_simulator(genesis_config: Option) -> UserAccount { /// use skw_sdk_sim::*; /// use fungible_token::ContractContract; /// use std::convert::TryInto; -/// use near_sdk::AccountId; +/// use skw_contract_sdk::AccountId; /// let master_account = skw_sdk_sim::init_simulator(None); /// let master_account_id: AccountId = master_account.account_id().try_into().unwrap(); /// let initial_balance = skw_sdk_sim::to_yocto("35"); diff --git a/skw-contract-sdk/sys/src/lib.rs b/skw-contract-sdk/sys/src/lib.rs index c2ac100..184a450 100644 --- a/skw-contract-sdk/sys/src/lib.rs +++ b/skw-contract-sdk/sys/src/lib.rs @@ -17,7 +17,6 @@ extern "C" { pub fn input(register_id: u64); pub fn block_number() -> u64; pub fn block_timestamp() -> u64; - pub fn epoch_height() -> u64; pub fn storage_usage() -> u64; // ################# // # Economics API # @@ -83,7 +82,6 @@ extern "C" { // ####################### // # Promise API actions # // ####################### - pub fn promise_batch_action_create_account(promise_index: u64); pub fn promise_batch_action_deploy_contract(promise_index: u64, code_len: u64, code_ptr: u64); pub fn promise_batch_action_function_call( promise_index: u64, @@ -94,12 +92,6 @@ extern "C" { amount_ptr: u64, gas: u64, ); - pub fn promise_batch_action_transfer(promise_index: u64, amount_ptr: u64); - pub fn promise_batch_action_delete_account( - promise_index: u64, - beneficiary_id_len: u64, - beneficiary_id_ptr: u64, - ); // ####################### // # Promise API results # // #######################