From 84d3a51543fa6b2b698b3d972d31667811f70231 Mon Sep 17 00:00:00 2001 From: ananas-block Date: Mon, 20 Jan 2025 00:23:33 +0000 Subject: [PATCH] perf: optimize batched Merkle tree & output queue deserialization --- forester-utils/src/instructions.rs | 6 +- forester/src/batch_processor/common.rs | 18 +- forester/tests/batched_address_test.rs | 4 +- forester/tests/batched_state_test.rs | 4 +- program-libs/batched-merkle-tree/src/batch.rs | 102 +++-- .../batched-merkle-tree/src/batch_metadata.rs | 81 +++- .../src/initialize_state_tree.rs | 14 - .../batched-merkle-tree/src/merkle_tree.rs | 120 +++--- .../src/merkle_tree_metadata.rs | 10 + program-libs/batched-merkle-tree/src/queue.rs | 193 +++------ .../src/rollover_address_tree.rs | 4 +- .../src/rollover_state_tree.rs | 13 +- .../tests/initialize_address_tree.rs | 26 +- .../tests/initialize_state_tree.rs | 40 +- .../batched-merkle-tree/tests/merkle_tree.rs | 396 +++++++++--------- .../batched-merkle-tree/tests/queue.rs | 4 +- .../tests/rollover_address_tree.rs | 14 +- .../tests/rollover_state_tree.rs | 28 +- program-libs/zero-copy/src/slice_mut.rs | 3 + program-libs/zero-copy/src/vec.rs | 3 + .../tests/batched_merkle_tree_test.rs | 26 +- program-tests/registry-test/tests/tests.rs | 2 +- program-tests/system-cpi-test/tests/test.rs | 8 +- .../utils/src/assert_compressed_tx.rs | 45 +- program-tests/utils/src/e2e_test_env.rs | 2 + .../src/instructions/append_leaves.rs | 25 +- .../src/instructions/migrate_state.rs | 9 +- programs/system/src/invoke/verify_proof.rs | 5 +- .../program-test/src/indexer/test_indexer.rs | 2 +- .../program-test/src/test_batch_forester.rs | 36 +- 30 files changed, 600 insertions(+), 643 deletions(-) diff --git a/forester-utils/src/instructions.rs b/forester-utils/src/instructions.rs index 5ad8fe9477..ddb4ffec05 100644 --- a/forester-utils/src/instructions.rs +++ b/forester-utils/src/instructions.rs @@ -64,7 +64,7 @@ where .unwrap(); let full_batch_index = merkle_tree.queue_metadata.next_full_batch_index; - let batch = &merkle_tree.batches[full_batch_index as usize]; + let batch = &merkle_tree.queue_metadata.batches[full_batch_index as usize]; let zkp_batch_index = batch.get_num_inserted_zkps(); let leaves_hashchain = merkle_tree.hashchain_store[full_batch_index as usize][zkp_batch_index as usize]; @@ -232,7 +232,7 @@ pub async fn create_append_batch_ix_data>( let zkp_batch_size = output_queue.batch_metadata.zkp_batch_size; let num_inserted_zkps = - output_queue.batches[full_batch_index as usize].get_num_inserted_zkps(); + output_queue.batch_metadata.batches[full_batch_index as usize].get_num_inserted_zkps(); let leaves_hashchain = output_queue.hashchain_store[full_batch_index as usize][num_inserted_zkps as usize]; @@ -341,7 +341,7 @@ pub async fn create_nullify_batch_ix_data>( BatchedMerkleTreeAccount::state_from_bytes(account.data.as_mut_slice()).unwrap(); let batch_idx = merkle_tree.queue_metadata.next_full_batch_index as usize; let zkp_size = merkle_tree.queue_metadata.zkp_batch_size; - let batch = &merkle_tree.batches[batch_idx]; + let batch = &merkle_tree.queue_metadata.batches[batch_idx]; let zkp_idx = batch.get_num_inserted_zkps(); let hashchain = merkle_tree.hashchain_store[batch_idx][zkp_idx as usize]; let root = *merkle_tree.root_history.last().unwrap(); diff --git a/forester/src/batch_processor/common.rs b/forester/src/batch_processor/common.rs index ce671c9d75..cdc0c424c4 100644 --- a/forester/src/batch_processor/common.rs +++ b/forester/src/batch_processor/common.rs @@ -126,7 +126,7 @@ impl + IndexerType> BatchProcessor { }; let batch_index = tree.queue_metadata.next_full_batch_index; - match tree.batches.get(batch_index as usize) { + match tree.queue_metadata.batches.get(batch_index as usize) { Some(batch) => Self::calculate_completion(batch), None => 0.0, } @@ -139,7 +139,7 @@ impl + IndexerType> BatchProcessor { }; let batch_index = queue.batch_metadata.next_full_batch_index; - match queue.batches.get(batch_index as usize) { + match queue.batch_metadata.batches.get(batch_index as usize) { Some(batch) => Self::calculate_completion(batch), None => 0.0, } @@ -181,7 +181,7 @@ impl + IndexerType> BatchProcessor { let zkp_batch_size = output_queue.batch_metadata.zkp_batch_size; ( - output_queue.batches[batch_index as usize].get_num_inserted_zkps(), + output_queue.batch_metadata.batches[batch_index as usize].get_num_inserted_zkps(), zkp_batch_size as usize, ) }; @@ -206,7 +206,11 @@ impl + IndexerType> BatchProcessor { if let Ok(tree) = merkle_tree { let batch_index = tree.queue_metadata.next_full_batch_index; - let full_batch = tree.batches.get(batch_index as usize).unwrap(); + let full_batch = tree + .queue_metadata + .batches + .get(batch_index as usize) + .unwrap(); full_batch.get_state() != BatchState::Inserted && full_batch.get_current_zkp_batch_index() > full_batch.get_num_inserted_zkps() @@ -230,7 +234,11 @@ impl + IndexerType> BatchProcessor { if let Ok(queue) = output_queue { let batch_index = queue.batch_metadata.next_full_batch_index; - let full_batch = queue.batches.get(batch_index as usize).unwrap(); + let full_batch = queue + .batch_metadata + .batches + .get(batch_index as usize) + .unwrap(); full_batch.get_state() != BatchState::Inserted && full_batch.get_current_zkp_batch_index() > full_batch.get_num_inserted_zkps() diff --git a/forester/tests/batched_address_test.rs b/forester/tests/batched_address_test.rs index 8b9f27f062..d6e570796b 100644 --- a/forester/tests/batched_address_test.rs +++ b/forester/tests/batched_address_test.rs @@ -268,8 +268,8 @@ async fn test_address_batched() { let num_zkp_batches = batch_size / zkp_batch_size; let mut completed_items = 0; - for batch_idx in 0..merkle_tree.batches.len() { - let batch = merkle_tree.batches.get(batch_idx).unwrap(); + for batch_idx in 0..merkle_tree.queue_metadata.batches.len() { + let batch = merkle_tree.queue_metadata.batches.get(batch_idx).unwrap(); if batch.get_state() == BatchState::Inserted { completed_items += batch_size; } diff --git a/forester/tests/batched_state_test.rs b/forester/tests/batched_state_test.rs index 6d781910f4..f0d5c48c61 100644 --- a/forester/tests/batched_state_test.rs +++ b/forester/tests/batched_state_test.rs @@ -286,8 +286,8 @@ async fn test_state_batched() { let num_zkp_batches = batch_size / zkp_batch_size; let mut completed_items = 0; - for batch_idx in 0..output_queue.batches.len() { - let batch = output_queue.batches.get(batch_idx).unwrap(); + for batch_idx in 0..output_queue.batch_metadata.batches.len() { + let batch = output_queue.batch_metadata.batches.get(batch_idx).unwrap(); if batch.get_state() == BatchState::Inserted { completed_items += batch_size; } diff --git a/program-libs/batched-merkle-tree/src/batch.rs b/program-libs/batched-merkle-tree/src/batch.rs index 319bf51d14..e4b231effd 100644 --- a/program-libs/batched-merkle-tree/src/batch.rs +++ b/program-libs/batched-merkle-tree/src/batch.rs @@ -1,6 +1,7 @@ +use borsh::{BorshDeserialize, BorshSerialize}; use light_bloom_filter::BloomFilter; use light_hasher::{Hasher, Poseidon}; -use light_zero_copy::{slice_mut::ZeroCopySliceMutU64, vec::ZeroCopyVecU64}; +use light_zero_copy::vec::ZeroCopyVecU64; use solana_program::msg; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; @@ -43,13 +44,26 @@ impl From for u64 { /// - is part of a queue, by default a queue has two batches. /// - is inserted into the tree by zkp batch. #[repr(C)] -#[derive(Clone, Copy, Debug, PartialEq, Eq, KnownLayout, Immutable, IntoBytes, FromBytes)] +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + KnownLayout, + Immutable, + IntoBytes, + FromBytes, + Default, + BorshSerialize, + BorshDeserialize, +)] pub struct Batch { /// Number of inserted elements in the zkp batch. num_inserted: u64, state: u64, current_zkp_batch_index: u64, - num_inserted_zkps: u64, + pub num_inserted_zkps: u64, /// Number of iterations for the bloom_filter. pub num_iters: u64, /// Theoretical capacity of the bloom_filter. We want to make it much larger @@ -234,7 +248,7 @@ impl Batch { &mut self, bloom_filter_value: &[u8; 32], hashchain_value: &[u8; 32], - bloom_filter_stores: &mut [ZeroCopySliceMutU64], + bloom_filter_stores: &mut [&mut [u8]], hashchain_store: &mut ZeroCopyVecU64<[u8; 32]>, bloom_filter_index: usize, ) -> Result<(), BatchedMerkleTreeError> { @@ -251,13 +265,18 @@ impl Batch { BloomFilter::new( self.num_iters as usize, self.bloom_filter_capacity, - bloom_filter.as_mut_slice(), + bloom_filter, )? .insert(bloom_filter_value)?; // 3. Check that value is not in any other bloom filter. for bf_store in before.iter_mut().chain(after.iter_mut()) { - self.check_non_inclusion(bloom_filter_value, bf_store.as_mut_slice())?; + Self::check_non_inclusion( + self.num_iters as usize, + self.bloom_filter_capacity, + bloom_filter_value, + bf_store, + )?; } } Ok(()) @@ -310,12 +329,12 @@ impl Batch { /// Checks that value is not in the bloom filter. pub fn check_non_inclusion( - &self, + num_iters: usize, + bloom_filter_capacity: u64, value: &[u8; 32], store: &mut [u8], ) -> Result<(), BatchedMerkleTreeError> { - let mut bloom_filter = - BloomFilter::new(self.num_iters as usize, self.bloom_filter_capacity, store)?; + let mut bloom_filter = BloomFilter::new(num_iters, bloom_filter_capacity, store)?; if bloom_filter.contains(value) { return Err(BatchedMerkleTreeError::NonInclusionCheckFailed); } @@ -475,10 +494,10 @@ mod tests { fn test_insert() { // Behavior Input queue let mut batch = get_test_batch(); - let mut stores = vec![vec![0u8; 20_008]; 2]; + let mut stores = vec![vec![0u8; 20_000]; 2]; let mut bloom_filter_stores = stores .iter_mut() - .map(|store| ZeroCopySliceMutU64::new(20_000, store).unwrap()) + .map(|store| &mut store[..]) .collect::>(); let mut hashchain_store_bytes = vec![ 0u8; @@ -541,19 +560,24 @@ mod tests { let mut bloom_filter = BloomFilter { num_iters: batch.num_iters as usize, capacity: batch.bloom_filter_capacity, - store: bloom_filter_stores[processing_index].as_mut_slice(), + store: bloom_filter_stores[processing_index], }; assert!(bloom_filter.contains(&value)); let other_index = if processing_index == 0 { 1 } else { 0 }; - batch - .check_non_inclusion(&value, bloom_filter_stores[other_index].as_mut_slice()) - .unwrap(); - batch - .check_non_inclusion( - &value, - bloom_filter_stores[processing_index].as_mut_slice(), - ) - .unwrap_err(); + Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[other_index], + ) + .unwrap(); + Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[processing_index], + ) + .unwrap_err(); ref_batch.num_inserted += 1; if ref_batch.num_inserted == ref_batch.zkp_batch_size { @@ -611,10 +635,10 @@ mod tests { let mut batch = get_test_batch(); let value = [1u8; 32]; - let mut stores = vec![vec![0u8; 20_008]; 2]; + let mut stores = vec![vec![0u8; 20_000]; 2]; let mut bloom_filter_stores = stores .iter_mut() - .map(|store| ZeroCopySliceMutU64::new(20_000, store).unwrap()) + .map(|store| &mut store[..]) .collect::>(); let mut hashchain_store_bytes = vec![ 0u8; @@ -628,9 +652,15 @@ mod tests { ) .unwrap(); - assert!(batch - .check_non_inclusion(&value, bloom_filter_stores[processing_index].as_mut_slice()) - .is_ok()); + assert_eq!( + Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[processing_index] + ), + Ok(()) + ); let ref_batch = get_test_batch(); assert_eq!(batch, ref_batch); batch @@ -642,14 +672,22 @@ mod tests { processing_index, ) .unwrap(); - assert!(batch - .check_non_inclusion(&value, bloom_filter_stores[processing_index].as_mut_slice()) - .is_err()); + assert!(Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[processing_index] + ) + .is_err()); let other_index = if processing_index == 0 { 1 } else { 0 }; - assert!(batch - .check_non_inclusion(&value, bloom_filter_stores[other_index].as_mut_slice()) - .is_ok()); + assert!(Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[other_index] + ) + .is_ok()); } } diff --git a/program-libs/batched-merkle-tree/src/batch_metadata.rs b/program-libs/batched-merkle-tree/src/batch_metadata.rs index f72e0e4462..9baa79aded 100644 --- a/program-libs/batched-merkle-tree/src/batch_metadata.rs +++ b/program-libs/batched-merkle-tree/src/batch_metadata.rs @@ -1,5 +1,5 @@ use light_merkle_tree_metadata::{errors::MerkleTreeMetadataError, queue::QueueType}; -use light_zero_copy::{slice_mut::ZeroCopySliceMutU64, vec::ZeroCopyVecU64}; +use light_zero_copy::vec::ZeroCopyVecU64; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use crate::{ @@ -37,6 +37,7 @@ pub struct BatchMetadata { pub currently_processing_batch_index: u64, /// Next batch to be inserted into the tree. pub next_full_batch_index: u64, + pub batches: [Batch; 2], } impl BatchMetadata { @@ -45,6 +46,14 @@ impl BatchMetadata { self.batch_size / self.zkp_batch_size } + pub fn get_current_batch(&self) -> &Batch { + &self.batches[self.currently_processing_batch_index as usize] + } + + pub fn get_current_batch_mut(&mut self) -> &mut Batch { + &mut self.batches[self.currently_processing_batch_index as usize] + } + /// Validates that the batch size is properly divisible by the ZKP batch size. fn validate_batch_sizes( batch_size: u64, @@ -68,7 +77,12 @@ impl BatchMetadata { batch_size, currently_processing_batch_index: 0, next_full_batch_index: 0, + // Output queues don't use bloom filters. bloom_filter_capacity: 0, + batches: [ + Batch::new(0, 0, batch_size, zkp_batch_size, 0), + Batch::new(0, 0, batch_size, zkp_batch_size, batch_size), + ], }) } @@ -77,6 +91,8 @@ impl BatchMetadata { bloom_filter_capacity: u64, zkp_batch_size: u64, num_batches: u64, + num_iters: u64, + start_index: u64, ) -> Result { Self::validate_batch_sizes(batch_size, zkp_batch_size)?; @@ -87,6 +103,22 @@ impl BatchMetadata { currently_processing_batch_index: 0, next_full_batch_index: 0, bloom_filter_capacity, + batches: [ + Batch::new( + num_iters, + bloom_filter_capacity, + batch_size, + zkp_batch_size, + start_index, + ), + Batch::new( + num_iters, + bloom_filter_capacity, + batch_size, + zkp_batch_size, + batch_size + start_index, + ), + ], }) } @@ -98,7 +130,8 @@ impl BatchMetadata { } /// Increment the currently_processing_batch_index if current state is BatchState::Full. - pub fn increment_currently_processing_batch_index_if_full(&mut self, state: BatchState) { + pub fn increment_currently_processing_batch_index_if_full(&mut self) { + let state = self.get_current_batch().get_state(); if state == BatchState::Full { self.currently_processing_batch_index = (self.currently_processing_batch_index + 1) % self.num_batches; @@ -151,29 +184,22 @@ impl BatchMetadata { } else { BatchedQueueMetadata::LEN }; - let batches_size = - ZeroCopySliceMutU64::::required_size_for_capacity(self.num_batches); let value_vecs_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(self.batch_size) * num_value_vec; // Bloomfilter capacity is in bits. let bloom_filter_stores_size = - ZeroCopySliceMutU64::::required_size_for_capacity(self.bloom_filter_capacity / 8) - * num_bloom_filter_stores; + (self.bloom_filter_capacity / 8) as usize * num_bloom_filter_stores; let hashchain_store_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(self.get_num_zkp_batches()) * num_hashchain_store; - let size = account_size - + batches_size - + value_vecs_size - + bloom_filter_stores_size - + hashchain_store_size; + let size = account_size + value_vecs_size + bloom_filter_stores_size + hashchain_store_size; Ok(size) } } #[test] fn test_increment_next_full_batch_index_if_inserted() { - let mut metadata = BatchMetadata::new_input_queue(10, 10, 10, 2).unwrap(); + let mut metadata = BatchMetadata::new_input_queue(10, 10, 10, 2, 3, 0).unwrap(); assert_eq!(metadata.next_full_batch_index, 0); // increment next full batch index metadata.increment_next_full_batch_index_if_inserted(BatchState::Inserted); @@ -190,28 +216,45 @@ fn test_increment_next_full_batch_index_if_inserted() { #[test] fn test_increment_currently_processing_batch_index_if_full() { - let mut metadata = BatchMetadata::new_input_queue(10, 10, 10, 2).unwrap(); + let mut metadata = BatchMetadata::new_input_queue(10, 10, 10, 2, 3, 0).unwrap(); assert_eq!(metadata.currently_processing_batch_index, 0); + metadata + .get_current_batch_mut() + .advance_state_to_full() + .unwrap(); // increment currently_processing_batch_index - metadata.increment_currently_processing_batch_index_if_full(BatchState::Full); + metadata.increment_currently_processing_batch_index_if_full(); assert_eq!(metadata.currently_processing_batch_index, 1); + assert_eq!(metadata.next_full_batch_index, 0); + metadata + .get_current_batch_mut() + .advance_state_to_full() + .unwrap(); // increment currently_processing_batch_index - metadata.increment_currently_processing_batch_index_if_full(BatchState::Full); + metadata.increment_currently_processing_batch_index_if_full(); assert_eq!(metadata.currently_processing_batch_index, 0); + metadata + .get_current_batch_mut() + .advance_state_to_inserted() + .unwrap(); // try incrementing next full batch index with state not full - metadata.increment_currently_processing_batch_index_if_full(BatchState::Fill); + metadata.increment_currently_processing_batch_index_if_full(); assert_eq!(metadata.currently_processing_batch_index, 0); - metadata.increment_currently_processing_batch_index_if_full(BatchState::Inserted); + metadata + .get_current_batch_mut() + .advance_state_to_fill() + .unwrap(); + metadata.increment_currently_processing_batch_index_if_full(); assert_eq!(metadata.currently_processing_batch_index, 0); } #[test] fn test_batch_size_validation() { // Test invalid batch size - assert!(BatchMetadata::new_input_queue(10, 10, 3, 2).is_err()); + assert!(BatchMetadata::new_input_queue(10, 10, 3, 2, 3, 0).is_err()); assert!(BatchMetadata::new_output_queue(10, 3, 2).is_err()); // Test valid batch size - assert!(BatchMetadata::new_input_queue(9, 10, 3, 2).is_ok()); + assert!(BatchMetadata::new_input_queue(9, 10, 3, 2, 3, 0).is_ok()); assert!(BatchMetadata::new_output_queue(9, 3, 2).is_ok()); } diff --git a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs index 8f4accca38..08bc860685 100644 --- a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs +++ b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs @@ -312,14 +312,12 @@ pub fn get_state_merkle_tree_account_size_from_params( pub fn assert_state_mt_zero_copy_inited( account_data: &mut [u8], ref_account: crate::merkle_tree_metadata::BatchedMerkleTreeMetadata, - num_iters: u64, ) { let account = BatchedMerkleTreeAccount::state_from_bytes(account_data) .expect("from_bytes_unchecked_mut failed"); _assert_mt_zero_copy_inited::<{ crate::constants::BATCHED_STATE_TREE_TYPE }>( account, ref_account, - num_iters, TreeType::BatchedState as u64, ); } @@ -328,7 +326,6 @@ pub fn assert_state_mt_zero_copy_inited( pub fn assert_address_mt_zero_copy_inited( account_data: &mut [u8], ref_account: crate::merkle_tree_metadata::BatchedMerkleTreeMetadata, - num_iters: u64, ) { use crate::{constants::BATCHED_ADDRESS_TREE_TYPE, merkle_tree::BatchedMerkleTreeAccount}; @@ -337,7 +334,6 @@ pub fn assert_address_mt_zero_copy_inited( _assert_mt_zero_copy_inited::( account, ref_account, - num_iters, TreeType::Address as u64, ); } @@ -346,7 +342,6 @@ pub fn assert_address_mt_zero_copy_inited( fn _assert_mt_zero_copy_inited( mut account: BatchedMerkleTreeAccount, ref_account: crate::merkle_tree_metadata::BatchedMerkleTreeMetadata, - num_iters: u64, tree_type: u64, ) { use light_hasher::Hasher; @@ -354,7 +349,6 @@ fn _assert_mt_zero_copy_inited( let queue = account.queue_metadata; let ref_queue = ref_account.queue_metadata; let num_batches = ref_queue.num_batches as usize; - let mut next_index = account.next_index; assert_eq!(*account, ref_account, "metadata mismatch"); assert_eq!( @@ -382,10 +376,6 @@ fn _assert_mt_zero_copy_inited( "hashchain_store mismatch" ); - if tree_type == TreeType::BatchedAddress as u64 { - next_index = 2; - } - let queue_type = if tree_type == TreeType::BatchedState as u64 { QueueType::BatchedInput as u64 } else { @@ -396,11 +386,7 @@ fn _assert_mt_zero_copy_inited( ref_queue, queue_type, &mut account.value_vecs, - &mut account.bloom_filter_stores, - &mut account.batches, num_batches, - num_iters, - next_index, ); } diff --git a/program-libs/batched-merkle-tree/src/merkle_tree.rs b/program-libs/batched-merkle-tree/src/merkle_tree.rs index 99da3fb078..00b61cfec3 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree.rs @@ -16,8 +16,7 @@ use light_verifier::{ CompressedProof, }; use light_zero_copy::{ - cyclic_vec::ZeroCopyCyclicVecU64, errors::ZeroCopyError, slice_mut::ZeroCopySliceMutU64, - vec::ZeroCopyVecU64, + cyclic_vec::ZeroCopyCyclicVecU64, errors::ZeroCopyError, vec::ZeroCopyVecU64, }; use solana_program::{account_info::AccountInfo, msg}; use zerocopy::Ref; @@ -85,9 +84,8 @@ pub struct InstructionDataBatchAppendInputs { pub struct BatchedMerkleTreeAccount<'a> { metadata: Ref<&'a mut [u8], BatchedMerkleTreeMetadata>, pub root_history: ZeroCopyCyclicVecU64<'a, [u8; 32]>, - pub batches: ZeroCopySliceMutU64<'a, Batch>, pub value_vecs: Vec>, - pub bloom_filter_stores: Vec>, + pub bloom_filter_stores: Vec<&'a mut [u8]>, pub hashchain_store: Vec>, } @@ -186,7 +184,6 @@ impl<'a> BatchedMerkleTreeAccount<'a> { fn from_bytes( account_data: &'a mut [u8], ) -> Result, BatchedMerkleTreeError> { - let account_data_len = account_data.len(); // Discriminator is already checked in check_account_info. let (_discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); let (metadata, account_data) = @@ -195,12 +192,9 @@ impl<'a> BatchedMerkleTreeAccount<'a> { if metadata.tree_type != TREE_TYPE { return Err(MerkleTreeMetadataError::InvalidTreeType.into()); } - if account_data_len != metadata.get_account_size()? { - return Err(ZeroCopyError::InvalidAccountSize.into()); - } let (root_history, account_data) = ZeroCopyCyclicVecU64::from_bytes_at(account_data)?; - let (batches, value_vecs, bloom_filter_stores, hashchain_store) = input_queue_from_bytes( + let (value_vecs, bloom_filter_stores, hashchain_store) = input_queue_from_bytes( &metadata.queue_metadata, account_data, QueueType::BatchedInput as u64, @@ -209,7 +203,6 @@ impl<'a> BatchedMerkleTreeAccount<'a> { Ok(BatchedMerkleTreeAccount { metadata, root_history, - batches, value_vecs, bloom_filter_stores, hashchain_store, @@ -246,6 +239,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { input_queue_batch_size, input_queue_zkp_batch_size, )?; + account_metadata.queue_metadata.bloom_filter_capacity = bloom_filter_capacity; if account_data_len != account_metadata.get_account_size()? { msg!("merkle_tree_metadata: {:?}", account_metadata); @@ -277,19 +271,31 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // The initialized indexed Merkle tree contains two elements. account_metadata.next_index = 2; } + let next_index = account_metadata.next_index; - let (batches, value_vecs, bloom_filter_stores, hashchain_store) = init_queue( + for (i, batches) in account_metadata + .queue_metadata + .batches + .iter_mut() + .enumerate() + { + *batches = Batch::new( + num_iters, + bloom_filter_capacity, + input_queue_batch_size, + input_queue_zkp_batch_size, + input_queue_batch_size * (i as u64) + next_index, + ); + } + + let (value_vecs, bloom_filter_stores, hashchain_store, _) = init_queue( &account_metadata.queue_metadata, QueueType::BatchedInput as u64, account_data, - num_iters, - bloom_filter_capacity, - account_metadata.next_index, )?; Ok(BatchedMerkleTreeAccount { metadata: account_metadata, root_history, - batches, value_vecs, bloom_filter_stores, hashchain_store, @@ -343,8 +349,8 @@ impl<'a> BatchedMerkleTreeAccount<'a> { let new_root = instruction_data.new_root; let circuit_batch_size = queue_account.batch_metadata.zkp_batch_size; let start_index = self.next_index; - let full_batch = &mut queue_account.batches[full_batch_index]; - let num_zkps = full_batch.get_first_ready_zkp_batch()?; + let num_zkps = + queue_account.batch_metadata.batches[full_batch_index].get_first_ready_zkp_batch()?; // 1. Create public inputs hash. let public_input_hash = { @@ -376,12 +382,14 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // Update metadata and batch. { + println!("pre mark_as_inserted_in_merkle_tree -------------------------------"); // 3. Mark zkp batch as inserted in the merkle tree. - let full_batch_state = full_batch.mark_as_inserted_in_merkle_tree( - self.metadata.sequence_number, - root_index, - self.root_history_capacity, - )?; + let full_batch_state = queue_account.batch_metadata.batches[full_batch_index] + .mark_as_inserted_in_merkle_tree( + self.metadata.sequence_number, + root_index, + self.root_history_capacity, + )?; // 4. Increment next full batch index if inserted. queue_account .batch_metadata @@ -445,7 +453,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { id: [u8; 32], ) -> Result { let full_batch_index = self.queue_metadata.next_full_batch_index as usize; - let num_zkps = self.batches[full_batch_index].get_first_ready_zkp_batch()?; + let num_zkps = self.queue_metadata.batches[full_batch_index].get_first_ready_zkp_batch()?; let new_root = instruction_data.new_root; let circuit_batch_size = self.queue_metadata.zkp_batch_size; @@ -488,11 +496,12 @@ impl<'a> BatchedMerkleTreeAccount<'a> { let root_history_capacity = self.root_history_capacity; let sequence_number = self.sequence_number; // 3. Mark batch as inserted in the merkle tree. - let full_batch_state = self.batches[full_batch_index].mark_as_inserted_in_merkle_tree( - sequence_number, - root_index, - root_history_capacity, - )?; + let full_batch_state = self.queue_metadata.batches[full_batch_index] + .mark_as_inserted_in_merkle_tree( + sequence_number, + root_index, + root_history_capacity, + )?; // 4. Zero out previous batch bloom filter // if current batch is 50% inserted. @@ -619,7 +628,6 @@ impl<'a> BatchedMerkleTreeAccount<'a> { let (root_index, sequence_number) = insert_into_current_batch( QueueType::BatchedInput as u64, &mut self.metadata.queue_metadata, - &mut self.batches, &mut self.value_vecs, &mut self.bloom_filter_stores, &mut self.hashchain_store, @@ -684,8 +692,8 @@ impl<'a> BatchedMerkleTreeAccount<'a> { /// which can prove inclusion of a value inserted in the queue. /// 1. Check whether overlapping roots exist. /// 2. If yes: - /// 2.1 Get, first safe root index. - /// 2.2 Zero out roots from the oldest root to first safe root. + /// 2.1. Get, first safe root index. + /// 2.2. Zero out roots from the oldest root to first safe root. fn zero_out_roots(&mut self, sequence_number: u64, first_safe_root_index: u32) { // 1. Check whether overlapping roots exist. let overlapping_roots_exits = sequence_number > self.sequence_number; @@ -737,9 +745,9 @@ impl<'a> BatchedMerkleTreeAccount<'a> { /// 1. Previous batch must be inserted and bloom filter must not be zeroed out. /// 2. Current batch must be 50% full /// 3. if yes - /// 3.1 zero out bloom filter - /// 3.2 mark bloom filter as zeroed - /// 3.3 zero out roots if needed + /// 3.1. mark bloom filter as zeroed + /// 3.2. zero out bloom filter + /// 3.3. zero out roots if needed fn zero_out_previous_batch_bloom_filter(&mut self) -> Result<(), BatchedMerkleTreeError> { let current_batch = self.queue_metadata.next_full_batch_index as usize; let batch_size = self.queue_metadata.batch_size; @@ -749,13 +757,9 @@ impl<'a> BatchedMerkleTreeAccount<'a> { } else { previous_full_batch_index }; - let current_batch_is_half_full = { - let num_inserted_elements = self - .batches - .get(current_batch) - .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)? - .get_num_inserted_elements(); + let num_inserted_elements = + self.queue_metadata.batches[current_batch].get_num_inserted_elements(); // Keep for finegrained unit test println!("current_batch: {}", current_batch); println!("previous_full_batch_index: {}", previous_full_batch_index); @@ -770,6 +774,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { }; let previous_full_batch = self + .queue_metadata .batches .get_mut(previous_full_batch_index) .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)?; @@ -783,22 +788,22 @@ impl<'a> BatchedMerkleTreeAccount<'a> { println!("Wiping bloom filter of previous batch"); println!("current_batch: {}", current_batch); println!("previous_full_batch_index: {}", previous_full_batch_index); - // 3.1 Zero out bloom filter. + // 3.1. Mark bloom filter zeroed. + previous_full_batch.set_bloom_filter_to_zeroed(); + let seq = previous_full_batch.sequence_number; + let root_index = previous_full_batch.root_index; + // 3.2. Zero out bloom filter. { let bloom_filter = self .bloom_filter_stores .get_mut(previous_full_batch_index) .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)?; - bloom_filter.as_mut_slice().iter_mut().for_each(|x| *x = 0); + bloom_filter.iter_mut().for_each(|x| *x = 0); } - // 3.2 Mark bloom filter zeroed. - previous_full_batch.set_bloom_filter_to_zeroed(); - // 3.3 Zero out roots if a root exists in root history + // 3.3. Zero out roots if a root exists in root history // which allows to prove inclusion of a value // that was inserted into the bloom filter just zeroed out. { - let seq = previous_full_batch.sequence_number; - let root_index = previous_full_batch.root_index; self.zero_out_roots(seq, root_index); } } @@ -833,13 +838,13 @@ impl<'a> BatchedMerkleTreeAccount<'a> { &mut self, value: &[u8; 32], ) -> Result<(), BatchedMerkleTreeError> { - let num_bloom_filters = self.bloom_filter_stores.len(); - for i in 0..num_bloom_filters { - let bloom_filter_store = self.bloom_filter_stores[i].as_mut_slice(); - let batch = &self.batches[i]; - if !batch.bloom_filter_is_zeroed() { - batch.check_non_inclusion(value, bloom_filter_store)?; - } + for i in 0..2 { + Batch::check_non_inclusion( + self.queue_metadata.batches[i].num_iters as usize, + self.queue_metadata.batches[i].bloom_filter_capacity, + value, + self.bloom_filter_stores[i], + )?; } Ok(()) } @@ -909,7 +914,11 @@ pub fn assert_nullify_event( mt_pubkey: Pubkey, ) { let batch_index = old_account.queue_metadata.next_full_batch_index; - let batch = old_account.batches.get(batch_index as usize).unwrap(); + let batch = old_account + .queue_metadata + .batches + .get(batch_index as usize) + .unwrap(); let ref_event = BatchNullifyEvent { id: mt_pubkey.to_bytes(), batch_index, @@ -933,6 +942,7 @@ pub fn assert_batch_append_event_event( .batch_metadata .next_full_batch_index; let batch = old_output_queue_account + .batch_metadata .batches .get(batch_index as usize) .unwrap(); diff --git a/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs b/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs index 08429a2fb0..9e0af07b82 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs @@ -120,6 +120,7 @@ impl BatchedMerkleTreeMetadata { root_history_capacity, height, num_batches, + num_iters, } = params; Self { metadata: MerkleTreeMetadata { @@ -145,6 +146,12 @@ impl BatchedMerkleTreeMetadata { bloom_filter_capacity, zkp_batch_size, num_batches, + num_iters, + if tree_type == TreeType::BatchedAddress { + 2 + } else { + 0 + }, ) .unwrap(), capacity: 2u64.pow(height), @@ -166,6 +173,7 @@ pub struct CreateTreeParams { pub root_history_capacity: u32, pub height: u32, pub num_batches: u64, + pub num_iters: u64, } impl CreateTreeParams { pub fn from_state_ix_params(data: InitStateTreeAccountsInstructionData, owner: Pubkey) -> Self { @@ -182,6 +190,7 @@ impl CreateTreeParams { root_history_capacity: data.root_history_capacity, height: data.height, num_batches: data.input_queue_num_batches, + num_iters: data.bloom_filter_num_iters, } } @@ -202,6 +211,7 @@ impl CreateTreeParams { root_history_capacity: data.root_history_capacity, height: data.height, num_batches: data.input_queue_num_batches, + num_iters: data.bloom_filter_num_iters, } } } diff --git a/program-libs/batched-merkle-tree/src/queue.rs b/program-libs/batched-merkle-tree/src/queue.rs index fcaea2091f..869599e5e1 100644 --- a/program-libs/batched-merkle-tree/src/queue.rs +++ b/program-libs/batched-merkle-tree/src/queue.rs @@ -10,7 +10,7 @@ use light_utils::{ account::{check_account_info, check_discriminator, set_discriminator, DISCRIMINATOR_LEN}, pubkey::Pubkey, }; -use light_zero_copy::{errors::ZeroCopyError, slice_mut::ZeroCopySliceMutU64, vec::ZeroCopyVecU64}; +use light_zero_copy::{errors::ZeroCopyError, vec::ZeroCopyVecU64}; use solana_program::{account_info::AccountInfo, msg}; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, Ref}; @@ -63,11 +63,21 @@ impl BatchedQueueMetadata { batch_size: u64, zkp_batch_size: u64, bloom_filter_capacity: u64, + num_iters: u64, ) -> Result<(), BatchedMerkleTreeError> { self.metadata = meta_data; self.batch_metadata .init(num_batches, batch_size, zkp_batch_size)?; self.batch_metadata.bloom_filter_capacity = bloom_filter_capacity; + for (i, batches) in self.batch_metadata.batches.iter_mut().enumerate() { + *batches = Batch::new( + num_iters, + bloom_filter_capacity, + batch_size, + zkp_batch_size, + batch_size * (i as u64), + ); + } Ok(()) } } @@ -117,9 +127,7 @@ impl BatchedQueueMetadata { #[derive(Debug, PartialEq)] pub struct BatchedQueueAccount<'a> { metadata: Ref<&'a mut [u8], BatchedQueueMetadata>, - pub batches: ZeroCopySliceMutU64<'a, Batch>, pub value_vecs: Vec>, - pub bloom_filter_stores: Vec>, pub hashchain_store: Vec>, } @@ -163,15 +171,14 @@ impl<'a> BatchedQueueAccount<'a> { pub fn output_from_bytes( account_data: &'a mut [u8], ) -> Result, BatchedMerkleTreeError> { + check_discriminator::(&account_data[..DISCRIMINATOR_LEN])?; Self::from_bytes::(account_data) } fn from_bytes( account_data: &'a mut [u8], ) -> Result, BatchedMerkleTreeError> { - let (discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); - check_discriminator::(discriminator)?; - + let (_discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); let (metadata, account_data) = Ref::<&'a mut [u8], BatchedQueueMetadata>::from_prefix(account_data) .map_err(|e| BatchedMerkleTreeError::ZeroCopyCastError(e.to_string()))?; @@ -179,20 +186,14 @@ impl<'a> BatchedQueueAccount<'a> { if metadata.metadata.queue_type != QUEUE_TYPE { return Err(MerkleTreeMetadataError::InvalidQueueType.into()); } - let (num_value_stores, num_stores, num_hashchain_stores) = + let (num_value_stores, _num_stores, num_hashchain_stores) = metadata.get_size_parameters()?; - let (batches, value_vecs, bloom_filter_stores, hashchain_store) = output_queue_from_bytes( - num_value_stores, - num_stores, - num_hashchain_stores, - account_data, - )?; + let (value_vecs, hashchain_store) = + output_queue_from_bytes(num_value_stores, num_hashchain_stores, account_data)?; Ok(BatchedQueueAccount { metadata, - batches, value_vecs, - bloom_filter_stores, hashchain_store, }) } @@ -220,7 +221,9 @@ impl<'a> BatchedQueueAccount<'a> { output_queue_batch_size, output_queue_zkp_batch_size, bloom_filter_capacity, + num_iters, )?; + if account_data_len != account_metadata .batch_metadata @@ -236,19 +239,14 @@ impl<'a> BatchedQueueAccount<'a> { return Err(ZeroCopyError::InvalidAccountSize.into()); } - let (batches, value_vecs, bloom_filter_stores, hashchain_store) = init_queue( + let (value_vecs, _bloom_filter_stores, hashchain_store, _) = init_queue( &account_metadata.batch_metadata, account_metadata.metadata.queue_type, account_data, - num_iters, - bloom_filter_capacity, - 0, )?; Ok(BatchedQueueAccount { metadata: account_metadata, - batches, value_vecs, - bloom_filter_stores, hashchain_store, }) } @@ -266,9 +264,8 @@ impl<'a> BatchedQueueAccount<'a> { insert_into_current_batch( self.metadata.metadata.queue_type, &mut self.metadata.batch_metadata, - &mut self.batches, &mut self.value_vecs, - self.bloom_filter_stores.as_mut_slice(), + &mut [], &mut self.hashchain_store, hash_chain_value, None, @@ -293,7 +290,7 @@ impl<'a> BatchedQueueAccount<'a> { leaf_index: u64, hash_chain_value: &[u8; 32], ) -> Result { - for (batch_index, batch) in self.batches.iter().enumerate() { + for (batch_index, batch) in self.batch_metadata.batches.iter().enumerate() { if batch.leaf_index_could_exist_in_batch(leaf_index)? { let index = batch.get_value_index_in_batch(leaf_index)?; let element = self.value_vecs[batch_index] @@ -319,7 +316,7 @@ impl<'a> BatchedQueueAccount<'a> { &mut self, leaf_index: u64, ) -> Result<(), BatchedMerkleTreeError> { - for batch in self.batches.iter() { + for batch in self.batch_metadata.batches.iter() { let res = batch.leaf_index_could_exist_in_batch(leaf_index)?; if res { return Ok(()); @@ -336,7 +333,7 @@ impl<'a> BatchedQueueAccount<'a> { leaf_index: u64, hash_chain_value: &[u8; 32], ) -> Result<(), BatchedMerkleTreeError> { - for (batch_index, batch) in self.batches.iter().enumerate() { + for (batch_index, batch) in self.batch_metadata.batches.iter().enumerate() { if batch.leaf_index_could_exist_in_batch(leaf_index)? { let index = batch.get_value_index_in_batch(leaf_index)?; let element = self.value_vecs[batch_index] @@ -365,7 +362,7 @@ impl<'a> BatchedQueueAccount<'a> { /// Returns the number of elements inserted in the current batch. pub fn get_num_inserted_in_current_batch(&self) -> u64 { let next_full_batch = self.batch_metadata.currently_processing_batch_index as usize; - self.batches[next_full_batch].get_num_inserted_elements() + self.batch_metadata.batches[next_full_batch].get_num_inserted_elements() } /// Returns true if the pubkey is the associated Merkle tree of the queue. @@ -423,9 +420,8 @@ impl DerefMut for BatchedQueueAccount<'_> { pub(crate) fn insert_into_current_batch( queue_type: u64, batch_metadata: &mut BatchMetadata, - batches: &mut ZeroCopySliceMutU64, value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], - bloom_filter_stores: &mut [ZeroCopySliceMutU64], + bloom_filter_stores: &mut [&mut [u8]], hashchain_store: &mut [ZeroCopyVecU64<[u8; 32]>], hash_chain_value: &[u8; 32], bloom_filter_value: Option<&[u8; 32]>, @@ -436,9 +432,7 @@ pub(crate) fn insert_into_current_batch( let batch_index = batch_metadata.currently_processing_batch_index as usize; let mut value_store = value_vecs.get_mut(batch_index); let mut hashchain_store = hashchain_store.get_mut(batch_index); - let current_batch = batches - .get_mut(batch_index) - .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)?; + let current_batch = batch_metadata.get_current_batch_mut(); // 1. Check that the current batch is ready. // 1.1. If the current batch is inserted, clear the batch. { @@ -477,7 +471,7 @@ pub(crate) fn insert_into_current_batch( } } else { // We expect to insert into the current batch. - for batch in batches.iter_mut() { + for batch in batch_metadata.batches.iter() { msg!("batch {:?}", batch); } return Err(BatchedMerkleTreeError::BatchNotReady); @@ -503,36 +497,32 @@ pub(crate) fn insert_into_current_batch( }?; // 3. If batch is full, increment currently_processing_batch_index. - batch_metadata.increment_currently_processing_batch_index_if_full(current_batch.get_state()); + batch_metadata.increment_currently_processing_batch_index_if_full(); Ok((root_index, sequence_number)) } +#[inline(always)] #[allow(clippy::type_complexity)] pub(crate) fn output_queue_from_bytes( num_value_stores: usize, - num_stores: usize, num_hashchain_stores: usize, account_data: &mut [u8], ) -> Result< ( - ZeroCopySliceMutU64<'_, Batch>, Vec>, - Vec>, Vec>, ), BatchedMerkleTreeError, > { - let (batches, account_data) = ZeroCopySliceMutU64::from_bytes_at(account_data)?; let (value_vecs, account_data) = ZeroCopyVecU64::from_bytes_at_multiple(num_value_stores, account_data)?; - let (bloom_filter_stores, account_data) = - ZeroCopySliceMutU64::from_bytes_at_multiple(num_stores, account_data)?; let (hashchain_store, _) = ZeroCopyVecU64::from_bytes_at_multiple(num_hashchain_stores, account_data)?; - Ok((batches, value_vecs, bloom_filter_stores, hashchain_store)) + Ok((value_vecs, hashchain_store)) } +#[inline(always)] #[allow(clippy::type_complexity)] pub(crate) fn input_queue_from_bytes<'a>( batch_metadata: &BatchMetadata, @@ -540,26 +530,25 @@ pub(crate) fn input_queue_from_bytes<'a>( queue_type: u64, ) -> Result< ( - ZeroCopySliceMutU64<'a, Batch>, Vec>, - Vec>, + Vec<&'a mut [u8]>, Vec>, ), BatchedMerkleTreeError, > { - let (num_value_stores, num_stores, hashchain_store_capacity) = + let (num_value_stores, _, hashchain_store_capacity) = batch_metadata.get_size_parameters(queue_type)?; - let (batches, account_data) = ZeroCopySliceMutU64::from_bytes_at(account_data)?; let (value_vecs, account_data) = ZeroCopyVecU64::from_bytes_at_multiple(num_value_stores, account_data)?; - let (bloom_filter_stores, account_data) = - ZeroCopySliceMutU64::from_bytes_at_multiple(num_stores, account_data)?; + let (bloom_filter_stores, account_data) = deserialize_bloom_filter_stores( + (batch_metadata.bloom_filter_capacity / 8) as usize, + account_data, + ); let (hashchain_store, _) = ZeroCopyVecU64::from_bytes_at_multiple(hashchain_store_capacity, account_data)?; - - Ok((batches, value_vecs, bloom_filter_stores, hashchain_store)) + Ok((value_vecs, bloom_filter_stores, hashchain_store)) } #[allow(clippy::type_complexity)] @@ -567,49 +556,51 @@ pub(crate) fn init_queue<'a>( batch_metadata: &BatchMetadata, queue_type: u64, account_data: &'a mut [u8], - num_iters: u64, - bloom_filter_capacity: u64, - batch_start_index: u64, ) -> Result< ( - ZeroCopySliceMutU64<'a, Batch>, Vec>, - Vec>, + Vec<&'a mut [u8]>, Vec>, + &'a mut [u8], ), BatchedMerkleTreeError, > { - let (num_value_stores, num_stores, num_hashchain_stores) = + let (num_value_stores, _num_stores, num_hashchain_stores) = batch_metadata.get_size_parameters(queue_type)?; - let (mut batches, account_data) = - ZeroCopySliceMutU64::new_at(batch_metadata.num_batches, account_data)?; - - for i in 0..batch_metadata.num_batches { - batches[i as usize] = Batch::new( - num_iters, - bloom_filter_capacity, - batch_metadata.batch_size, - batch_metadata.zkp_batch_size, - batch_metadata.batch_size * i + batch_start_index, - ); - } let (value_vecs, account_data) = ZeroCopyVecU64::new_at_multiple(num_value_stores, batch_metadata.batch_size, account_data)?; - let (bloom_filter_stores, account_data) = ZeroCopySliceMutU64::new_at_multiple( - num_stores, - batch_metadata.bloom_filter_capacity / 8, + let (bloom_filter_stores, account_data) = deserialize_bloom_filter_stores( + (batch_metadata.bloom_filter_capacity / 8) as usize, account_data, - )?; - - let (hashchain_store, _) = ZeroCopyVecU64::new_at_multiple( + ); + let (hashchain_store, account_data) = ZeroCopyVecU64::new_at_multiple( num_hashchain_stores, batch_metadata.get_num_zkp_batches(), account_data, )?; - Ok((batches, value_vecs, bloom_filter_stores, hashchain_store)) + Ok(( + value_vecs, + bloom_filter_stores, + hashchain_store, + account_data, + )) +} + +#[inline(always)] +fn deserialize_bloom_filter_stores( + bloom_filter_capacity: usize, + mut account_data: &mut [u8], +) -> (Vec<&mut [u8]>, &mut [u8]) { + let mut bloom_filter_stores = Vec::with_capacity(2); + for _ in 0..2 { + let (slice, _bytes) = account_data.split_at_mut(bloom_filter_capacity); + account_data = _bytes; + bloom_filter_stores.push(slice); + } + (bloom_filter_stores, account_data) } pub fn get_output_queue_account_size_default() -> usize { @@ -679,28 +670,12 @@ pub fn assert_queue_inited( ref_batch_metadata: BatchMetadata, queue_type: u64, value_vecs: &mut Vec>, - bloom_filter_stores: &mut Vec>, - batches: &mut ZeroCopySliceMutU64<'_, Batch>, num_batches: usize, - num_iters: u64, - start_index: u64, ) { assert_eq!( batch_metadata, ref_batch_metadata, "batch_metadata mismatch" ); - assert_eq!(batches.len(), num_batches, "batches mismatch"); - for (i, batch) in batches.iter().enumerate() { - let ref_batch = Batch::new( - num_iters, - ref_batch_metadata.bloom_filter_capacity, - ref_batch_metadata.batch_size, - ref_batch_metadata.zkp_batch_size, - ref_batch_metadata.batch_size * i as u64 + start_index, - ); - - assert_eq!(batch, &ref_batch, "batch mismatch"); - } if queue_type == QueueType::BatchedOutput as u64 { assert_eq!(value_vecs.capacity(), num_batches, "value_vecs mismatch"); @@ -710,33 +685,6 @@ pub fn assert_queue_inited( assert_eq!(value_vecs.capacity(), 0, "value_vecs mismatch"); } - if queue_type == QueueType::BatchedOutput as u64 { - assert_eq!( - bloom_filter_stores.capacity(), - 0, - "bloom_filter_stores mismatch" - ); - } else { - assert_eq!( - bloom_filter_stores.capacity(), - num_batches, - "bloom_filter_stores mismatch" - ); - assert_eq!( - bloom_filter_stores.len(), - num_batches, - "bloom_filter_stores mismatch" - ); - } - - for vec in bloom_filter_stores { - assert_eq!( - vec.len() * 8, - batch_metadata.bloom_filter_capacity as usize, - "bloom_filter_capacity mismatch" - ); - } - for vec in value_vecs.iter() { assert_eq!( vec.capacity(), @@ -748,17 +696,12 @@ pub fn assert_queue_inited( } #[cfg(not(target_os = "solana"))] -pub fn assert_queue_zero_copy_inited( - account_data: &mut [u8], - ref_account: BatchedQueueMetadata, - num_iters: u64, -) { +pub fn assert_queue_zero_copy_inited(account_data: &mut [u8], ref_account: BatchedQueueMetadata) { let mut account = BatchedQueueAccount::output_from_bytes(account_data) .expect("from_bytes_unchecked_mut failed"); let num_batches = ref_account.batch_metadata.num_batches as usize; let batch_metadata = account.batch_metadata; let queue_type = account.metadata.metadata.queue_type; - let next_index = account.next_index; assert_eq!( account.metadata.metadata, ref_account.metadata, "metadata mismatch" @@ -768,10 +711,6 @@ pub fn assert_queue_zero_copy_inited( ref_account.batch_metadata, queue_type, &mut account.value_vecs, - &mut account.bloom_filter_stores, - &mut account.batches, num_batches, - num_iters, - next_index, ); } diff --git a/program-libs/batched-merkle-tree/src/rollover_address_tree.rs b/program-libs/batched-merkle-tree/src/rollover_address_tree.rs index 2ecf6e1e3c..62f268001a 100644 --- a/program-libs/batched-merkle-tree/src/rollover_address_tree.rs +++ b/program-libs/batched-merkle-tree/src/rollover_address_tree.rs @@ -65,7 +65,7 @@ fn create_batched_address_tree_init_params( input_queue_batch_size: old_merkle_tree.queue_metadata.batch_size, input_queue_zkp_batch_size: old_merkle_tree.queue_metadata.zkp_batch_size, bloom_filter_capacity: old_merkle_tree.queue_metadata.bloom_filter_capacity, - bloom_filter_num_iters: old_merkle_tree.batches[0].num_iters, + bloom_filter_num_iters: old_merkle_tree.queue_metadata.batches[0].num_iters, root_history_capacity: old_merkle_tree.root_history_capacity, network_fee, rollover_threshold: if_equals_none( @@ -91,7 +91,6 @@ pub fn assert_address_mt_roll_over( mut new_mt_account_data: Vec, new_ref_mt_account: crate::merkle_tree_metadata::BatchedMerkleTreeMetadata, new_mt_pubkey: Pubkey, - bloom_filter_num_iters: u64, ) { old_ref_mt_account .metadata @@ -104,6 +103,5 @@ pub fn assert_address_mt_roll_over( crate::initialize_state_tree::assert_address_mt_zero_copy_inited( &mut new_mt_account_data, new_ref_mt_account, - bloom_filter_num_iters, ); } diff --git a/program-libs/batched-merkle-tree/src/rollover_state_tree.rs b/program-libs/batched-merkle-tree/src/rollover_state_tree.rs index dafb08d62e..a56e65da58 100644 --- a/program-libs/batched-merkle-tree/src/rollover_state_tree.rs +++ b/program-libs/batched-merkle-tree/src/rollover_state_tree.rs @@ -105,7 +105,7 @@ impl From<&RolloverBatchStateTreeParams<'_>> for InitStateTreeAccountsInstructio input_queue_batch_size: params.old_merkle_tree.queue_metadata.batch_size, input_queue_zkp_batch_size: params.old_merkle_tree.queue_metadata.zkp_batch_size, bloom_filter_capacity: params.old_merkle_tree.queue_metadata.bloom_filter_capacity, - bloom_filter_num_iters: params.old_merkle_tree.batches[0].num_iters, + bloom_filter_num_iters: params.old_merkle_tree.queue_metadata.batches[0].num_iters, root_history_capacity: params.old_merkle_tree.root_history_capacity, network_fee: params.network_fee, rollover_threshold: if_equals_none( @@ -128,7 +128,7 @@ impl From<&RolloverBatchStateTreeParams<'_>> for InitStateTreeAccountsInstructio additional_bytes: params.additional_bytes, output_queue_batch_size: params.old_output_queue.batch_metadata.batch_size, output_queue_zkp_batch_size: params.old_output_queue.batch_metadata.zkp_batch_size, - output_queue_num_batches: params.old_output_queue.batches.len() as u64, + output_queue_num_batches: params.old_output_queue.batch_metadata.batches.len() as u64, } } } @@ -164,7 +164,6 @@ pub struct StateMtRollOverAssertParams { pub new_mt_account_data: Vec, pub old_mt_pubkey: Pubkey, pub new_mt_pubkey: Pubkey, - pub bloom_filter_num_iters: u64, pub ref_rolledover_mt: BatchedMerkleTreeMetadata, pub queue_account_data: Vec, pub ref_queue_account: BatchedQueueMetadata, @@ -183,7 +182,7 @@ pub fn assert_state_mt_roll_over(params: StateMtRollOverAssertParams) { new_mt_account_data, old_mt_pubkey, new_mt_pubkey, - bloom_filter_num_iters, + ref_rolledover_mt, mut queue_account_data, ref_queue_account, @@ -203,7 +202,7 @@ pub fn assert_state_mt_roll_over(params: StateMtRollOverAssertParams) { .rollover_metadata .rolledover_slot = slot; - crate::queue::assert_queue_zero_copy_inited(&mut new_queue_account_data, ref_queue_account, 0); + crate::queue::assert_queue_zero_copy_inited(&mut new_queue_account_data, ref_queue_account); let zero_copy_queue = BatchedQueueAccount::output_from_bytes(&mut queue_account_data).unwrap(); assert_eq!(zero_copy_queue.metadata, ref_rolledover_queue.metadata); @@ -212,7 +211,6 @@ pub fn assert_state_mt_roll_over(params: StateMtRollOverAssertParams) { ref_mt_account, new_mt_account_data, new_mt_pubkey, - bloom_filter_num_iters, ref_rolledover_mt, old_queue_pubkey, slot, @@ -228,7 +226,6 @@ pub struct MtRollOverAssertParams { pub ref_mt_account: BatchedMerkleTreeMetadata, pub new_mt_account_data: Vec, pub new_mt_pubkey: Pubkey, - pub bloom_filter_num_iters: u64, pub ref_rolledover_mt: BatchedMerkleTreeMetadata, pub old_queue_pubkey: Pubkey, pub slot: u64, @@ -241,7 +238,6 @@ pub fn assert_mt_roll_over(params: MtRollOverAssertParams) { ref_mt_account, mut new_mt_account_data, new_mt_pubkey, - bloom_filter_num_iters, mut ref_rolledover_mt, old_queue_pubkey, slot, @@ -258,6 +254,5 @@ pub fn assert_mt_roll_over(params: MtRollOverAssertParams) { crate::initialize_state_tree::assert_state_mt_zero_copy_inited( &mut new_mt_account_data, ref_mt_account, - bloom_filter_num_iters, ); } diff --git a/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs b/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs index 02e7a52a39..d0714bd388 100644 --- a/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs +++ b/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs @@ -1,5 +1,4 @@ use light_batched_merkle_tree::{ - batch::Batch, initialize_address_tree::{ init_batched_address_merkle_tree_account, InitAddressTreeAccountsInstructionData, }, @@ -8,9 +7,7 @@ use light_batched_merkle_tree::{ merkle_tree_metadata::{BatchedMerkleTreeMetadata, CreateTreeParams}, }; use light_utils::pubkey::Pubkey; -use light_zero_copy::{ - cyclic_vec::ZeroCopyCyclicVecU64, slice_mut::ZeroCopySliceMutU64, vec::ZeroCopyVecU64, -}; +use light_zero_copy::{cyclic_vec::ZeroCopyCyclicVecU64, vec::ZeroCopyVecU64}; use rand::{rngs::StdRng, Rng}; #[test] @@ -27,11 +24,7 @@ fn test_account_init() { init_batched_address_merkle_tree_account(owner, params, &mut mt_account_data, merkle_tree_rent) .unwrap(); - assert_address_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); } #[test] @@ -67,7 +60,7 @@ fn test_rnd_account_init() { rollover_threshold: Some(rng.gen_range(0..100)), close_threshold: None, root_history_capacity: rng.gen_range(1..1000), - input_queue_num_batches: rng.gen_range(1..4), + input_queue_num_batches: 2, height: rng.gen_range(1..32), }; @@ -82,11 +75,7 @@ fn test_rnd_account_init() { { let num_zkp_batches = params.input_queue_batch_size / params.input_queue_zkp_batch_size; let num_batches = params.input_queue_num_batches as usize; - let batch_size = - ZeroCopySliceMutU64::::required_size_for_capacity(num_batches as u64); - let bloom_filter_size = ZeroCopySliceMutU64::::required_size_for_capacity( - params.bloom_filter_capacity / 8, - ) * num_batches; + let bloom_filter_size = ((params.bloom_filter_capacity / 8) * 2u64) as usize; let hash_chain_store_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(num_zkp_batches) * num_batches; @@ -96,7 +85,6 @@ fn test_rnd_account_init() { // Output queue let ref_account_size = BatchedMerkleTreeMetadata::LEN + root_history_size - + batch_size + bloom_filter_size // 2 hash chain stores + hash_chain_store_size; @@ -116,10 +104,6 @@ fn test_rnd_account_init() { let mt_params = CreateTreeParams::from_address_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(mt_params, merkle_tree_rent); - assert_address_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); } } diff --git a/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs b/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs index b6ff2f86d3..c4d837b60c 100644 --- a/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs +++ b/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs @@ -1,5 +1,4 @@ use light_batched_merkle_tree::{ - batch::Batch, initialize_state_tree::{ assert_state_mt_zero_copy_inited, create_output_queue_account, init_batched_state_merkle_tree_accounts, CreateOutputQueueParams, @@ -13,9 +12,7 @@ use light_batched_merkle_tree::{ }, }; use light_utils::pubkey::Pubkey; -use light_zero_copy::{ - cyclic_vec::ZeroCopyCyclicVecU64, slice_mut::ZeroCopySliceMutU64, vec::ZeroCopyVecU64, -}; +use light_zero_copy::{cyclic_vec::ZeroCopyCyclicVecU64, vec::ZeroCopyVecU64}; use rand::{rngs::StdRng, Rng}; #[test] fn test_different_parameters() { @@ -70,16 +67,11 @@ fn test_different_parameters() { assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, - 0, ); let mt_params = CreateTreeParams::from_state_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey); - assert_state_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); } } @@ -123,15 +115,10 @@ fn test_account_init() { assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, - 0, ); let mt_params = CreateTreeParams::from_state_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey); - assert_state_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); } #[test] @@ -171,8 +158,8 @@ fn test_rnd_account_init() { rollover_threshold: Some(rng.gen_range(0..100)), close_threshold: None, root_history_capacity: rng.gen_range(1..1000), - input_queue_num_batches: rng.gen_range(1..4), - output_queue_num_batches: rng.gen_range(1..4), + input_queue_num_batches: 2, + output_queue_num_batches: 2, height: rng.gen_range(1..32), }; let queue_account_size = get_output_queue_account_size( @@ -185,9 +172,6 @@ fn test_rnd_account_init() { let num_batches = params.output_queue_num_batches as usize; let num_zkp_batches = params.output_queue_batch_size / params.output_queue_zkp_batch_size; - let batch_size = ZeroCopySliceMutU64::::required_size_for_capacity( - params.output_queue_num_batches, - ); let value_vec_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity( params.output_queue_batch_size, ) * num_batches; @@ -198,7 +182,6 @@ fn test_rnd_account_init() { let ref_queue_account_size = // metadata BatchedQueueMetadata::LEN - + batch_size // 2 value vecs + value_vec_size // 2 hash chain stores @@ -221,10 +204,7 @@ fn test_rnd_account_init() { { let num_zkp_batches = params.input_queue_batch_size / params.input_queue_zkp_batch_size; let num_batches = params.input_queue_num_batches; - let batch_size = ZeroCopySliceMutU64::::required_size_for_capacity(num_batches); - let bloom_filter_size = ZeroCopySliceMutU64::::required_size_for_capacity( - params.bloom_filter_capacity / 8, - ) * num_batches as usize; + let bloom_filter_size = ((params.bloom_filter_capacity / 8) * num_batches) as usize; let hash_chain_store_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(num_zkp_batches) * num_batches as usize; @@ -236,7 +216,6 @@ fn test_rnd_account_init() { // metadata BatchedMerkleTreeMetadata::LEN + root_history_size - + batch_size + bloom_filter_size // 2 hash chain stores + hash_chain_store_size; @@ -270,16 +249,11 @@ fn test_rnd_account_init() { assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, - 0, ); let mt_params = CreateTreeParams::from_state_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey); - assert_state_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); } } diff --git a/program-libs/batched-merkle-tree/tests/merkle_tree.rs b/program-libs/batched-merkle-tree/tests/merkle_tree.rs index c983a03472..3c17d5b5d7 100644 --- a/program-libs/batched-merkle-tree/tests/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/tests/merkle_tree.rs @@ -2,7 +2,7 @@ use std::cmp::min; use light_batched_merkle_tree::{ - batch::{Batch, BatchState}, + batch::BatchState, constants::{ ACCOUNT_COMPRESSION_PROGRAM_ID, DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT, @@ -36,14 +36,13 @@ use light_prover_client::{ }; use light_utils::{hashchain::create_hash_chain_from_slice, pubkey::Pubkey}; use light_verifier::CompressedProof; -use light_zero_copy::{slice_mut::ZeroCopySliceMutU64, vec::ZeroCopyVecU64}; +use light_zero_copy::vec::ZeroCopyVecU64; use rand::{rngs::StdRng, Rng}; use serial_test::serial; #[allow(clippy::too_many_arguments)] pub fn assert_nullifier_queue_insert( pre_account: BatchedMerkleTreeMetadata, - pre_batches: ZeroCopySliceMutU64, pre_value_vecs: &mut Vec>, pre_roots: Vec<[u8; 32]>, pre_hashchains: Vec>, @@ -63,7 +62,6 @@ pub fn assert_nullifier_queue_insert( } assert_input_queue_insert( pre_account, - pre_batches, pre_value_vecs, pre_roots, pre_hashchains, @@ -80,7 +78,6 @@ pub fn assert_nullifier_queue_insert( #[allow(clippy::too_many_arguments)] pub fn assert_input_queue_insert( mut pre_account: BatchedMerkleTreeMetadata, - mut pre_batches: ZeroCopySliceMutU64, pre_value_vecs: &mut Vec>, pre_roots: Vec<[u8; 32]>, mut pre_hashchains: Vec>, @@ -129,7 +126,11 @@ pub fn assert_input_queue_insert( let inserted_batch_index = pre_account.queue_metadata.currently_processing_batch_index as usize; - let expected_batch = pre_batches.get_mut(inserted_batch_index).unwrap(); + let expected_batch = pre_account + .queue_metadata + .batches + .get_mut(inserted_batch_index) + .unwrap(); println!( "assert input queue batch update: expected_batch: {:?}", expected_batch @@ -164,9 +165,9 @@ pub fn assert_input_queue_insert( ); // New value exists in the current batch bloom filter let mut bloom_filter = light_bloom_filter::BloomFilter::new( - merkle_tree_account.batches[inserted_batch_index].num_iters as usize, - merkle_tree_account.batches[inserted_batch_index].bloom_filter_capacity, - merkle_tree_account.bloom_filter_stores[inserted_batch_index].as_mut_slice(), + merkle_tree_account.queue_metadata.batches[inserted_batch_index].num_iters as usize, + merkle_tree_account.queue_metadata.batches[inserted_batch_index].bloom_filter_capacity, + merkle_tree_account.bloom_filter_stores[inserted_batch_index], ) .unwrap(); println!( @@ -177,16 +178,21 @@ pub fn assert_input_queue_insert( let pre_hashchain = pre_hashchains.get_mut(inserted_batch_index).unwrap(); expected_batch.add_to_hash_chain(&leaf_hashchain_insert_values[i], pre_hashchain)?; + let num_iters = + merkle_tree_account.queue_metadata.batches[inserted_batch_index].num_iters as usize; + let bloom_filter_capacity = + merkle_tree_account.queue_metadata.batches[inserted_batch_index].bloom_filter_capacity; // New value does not exist in the other batch bloom_filters - for (i, batch) in merkle_tree_account.batches.iter_mut().enumerate() { + for (i, store) in merkle_tree_account + .bloom_filter_stores + .iter_mut() + .enumerate() + { // Skip current batch it is already checked above if i != inserted_batch_index { - let mut bloom_filter = light_bloom_filter::BloomFilter::new( - batch.num_iters as usize, - batch.bloom_filter_capacity, - merkle_tree_account.bloom_filter_stores[i].as_mut_slice(), - ) - .unwrap(); + let mut bloom_filter = + light_bloom_filter::BloomFilter::new(num_iters, bloom_filter_capacity, store) + .unwrap(); assert!(!bloom_filter.contains(insert_value)); } } @@ -195,7 +201,7 @@ pub fn assert_input_queue_insert( // update if expected_batch.get_current_zkp_batch_index() == expected_batch.get_num_zkp_batches() { assert_eq!( - merkle_tree_account.batches + merkle_tree_account.queue_metadata.batches [pre_account.queue_metadata.currently_processing_batch_index as usize] .get_state(), BatchState::Full @@ -204,7 +210,7 @@ pub fn assert_input_queue_insert( pre_account.queue_metadata.currently_processing_batch_index %= pre_account.queue_metadata.num_batches; assert_eq!( - merkle_tree_account.batches[inserted_batch_index], + merkle_tree_account.queue_metadata.batches[inserted_batch_index], *expected_batch ); assert_eq!( @@ -223,18 +229,18 @@ pub fn assert_input_queue_insert( "BatchedMerkleTreeMetadata changed." ); let inserted_batch_index = pre_account.queue_metadata.currently_processing_batch_index as usize; - let mut expected_batch = pre_batches[inserted_batch_index]; + let mut expected_batch = pre_account.queue_metadata.batches[inserted_batch_index]; if should_be_zeroed { expected_batch.set_bloom_filter_to_zeroed(); } assert_eq!( - merkle_tree_account.batches[inserted_batch_index], + merkle_tree_account.queue_metadata.batches[inserted_batch_index], expected_batch ); let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; assert_eq!( - merkle_tree_account.batches[other_batch], - pre_batches[other_batch] + merkle_tree_account.queue_metadata.batches[other_batch], + pre_account.queue_metadata.batches[other_batch] ); assert_eq!( merkle_tree_account.hashchain_store, *pre_hashchains, @@ -249,30 +255,26 @@ pub fn assert_input_queue_insert( /// - if batch is full after insertion advance state to ReadyToUpdateTree pub fn assert_output_queue_insert( mut pre_account: BatchedQueueMetadata, - mut pre_batches: Vec, + // mut pre_batches: Vec, mut pre_value_store: Vec>, mut pre_hashchains: Vec>, mut output_account: BatchedQueueAccount, insert_values: Vec<[u8; 32]>, ) -> Result<(), BatchedMerkleTreeError> { - for batch in output_account.batches.iter_mut() { + for batch in output_account.batch_metadata.batches.iter_mut() { println!("output_account.batch: {:?}", batch); } - for batch in pre_batches.iter() { + for batch in pre_account.batch_metadata.batches.iter() { println!("pre_batch: {:?}", batch); } for insert_value in insert_values.iter() { - // There are no bloom_filters - for store in output_account.bloom_filter_stores.iter() { - assert_eq!(store.len(), 0); - } // if the currently processing batch changed it should // increment by one and the old batch should be ready to // update let inserted_batch_index = pre_account.batch_metadata.currently_processing_batch_index as usize; - let expected_batch = &mut pre_batches[inserted_batch_index]; + let expected_batch = &mut pre_account.batch_metadata.batches[inserted_batch_index]; let pre_value_store = pre_value_store.get_mut(inserted_batch_index).unwrap(); let pre_hashchain = pre_hashchains.get_mut(inserted_batch_index).unwrap(); if expected_batch.get_state() == BatchState::Inserted { @@ -295,7 +297,7 @@ pub fn assert_output_queue_insert( .contains(insert_value)); if expected_batch.get_num_zkp_batches() == expected_batch.get_current_zkp_batch_index() { assert!( - output_account.batches + output_account.batch_metadata.batches [pre_account.batch_metadata.currently_processing_batch_index as usize] .get_state() == BatchState::Full @@ -304,17 +306,11 @@ pub fn assert_output_queue_insert( pre_account.batch_metadata.currently_processing_batch_index %= pre_account.batch_metadata.num_batches; assert_eq!( - output_account.batches[inserted_batch_index], + output_account.batch_metadata.batches[inserted_batch_index], *expected_batch ); } } - let inserted_batch_index = pre_account.batch_metadata.currently_processing_batch_index as usize; - let expected_batch = &pre_batches[inserted_batch_index]; - assert_eq!( - output_account.batches[inserted_batch_index], - *expected_batch - ); assert_eq!( *output_account.get_metadata(), pre_account, @@ -335,7 +331,6 @@ pub fn assert_output_queue_insert( ); } } - assert_eq!(pre_batches, output_account.batches.to_vec()); assert_eq!(pre_value_store, output_account.value_vecs); Ok(()) } @@ -373,12 +368,17 @@ pub fn simulate_transaction( println!("simulate_transaction: inclusion is none"); let mut included = false; let mut leaf_index = 0; + let start_indices = output_account + .batch_metadata + .batches + .iter() + .map(|batch| batch.start_index) + .collect::>(); for (batch_index, value_vec) in output_account.value_vecs.iter_mut().enumerate() { for (value_index, value) in value_vec.iter_mut().enumerate() { if *value == *input { - let batch_start_index = - output_account.batches.get(batch_index).unwrap().start_index; + let batch_start_index = start_indices[batch_index]; included = true; println!("overwriting value: {:?}", value); *value = [0u8; 32]; @@ -528,7 +528,11 @@ async fn test_simulate_transactions() { panic!("Leaf not found in output queue."); } } - let batch = output_queue.batches.get(batch_index).unwrap(); + let batch = output_queue + .batch_metadata + .batches + .get(batch_index) + .unwrap(); array_indices.push(leaf_array_index); let leaf_index: u64 = batch.start_index + leaf_array_index as u64; leaf_indices.push(leaf_index); @@ -548,7 +552,7 @@ async fn test_simulate_transactions() { BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); println!( "input queue: {:?}", - merkle_tree_account.batches[0].get_num_inserted() + merkle_tree_account.queue_metadata.batches[0].get_num_inserted() ); let mut pre_mt_data = mt_account_data.clone(); @@ -557,7 +561,6 @@ async fn test_simulate_transactions() { let pre_output_account = BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); let pre_output_metadata = *pre_output_account.get_metadata(); - let pre_output_batches = pre_output_account.batches; let mut pre_output_value_stores = pre_output_account.value_vecs; let pre_output_hashchains = pre_output_account.hashchain_store; @@ -565,8 +568,6 @@ async fn test_simulate_transactions() { let pre_merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_bytes).unwrap(); let pre_mt_account = *pre_merkle_tree_account.get_metadata(); - let pre_batches = pre_merkle_tree_account.batches; - // let pre_value_store = pre_merkle_tree_account.value_vecs; let pre_roots = pre_merkle_tree_account .root_history .iter() @@ -591,7 +592,6 @@ async fn test_simulate_transactions() { println!("inputs: {:?}", inputs); assert_nullifier_queue_insert( pre_mt_account, - pre_batches, &mut pre_output_value_stores, // mut to remove values proven by index pre_roots, pre_mt_hashchains, @@ -608,7 +608,6 @@ async fn test_simulate_transactions() { if !outputs.is_empty() { assert_output_queue_insert( pre_output_metadata, - pre_output_batches.to_vec(), pre_output_value_stores, pre_output_hashchains, BatchedQueueAccount::output_from_bytes( @@ -635,12 +634,14 @@ async fn test_simulate_transactions() { let merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data).unwrap(); in_ready_for_update = merkle_tree_account + .queue_metadata .batches .iter() .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); let output_account = BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); out_ready_for_update = output_account + .batch_metadata .batches .iter() .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); @@ -663,10 +664,14 @@ async fn test_simulate_transactions() { let (input_res, new_root) = { let mut account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data).unwrap(); - println!("batches {:?}", account.batches); + println!("batches {:?}", account.queue_metadata.batches); let next_full_batch = account.get_metadata().queue_metadata.next_full_batch_index; - let batch = account.batches.get(next_full_batch as usize).unwrap(); + let batch = account + .queue_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); println!( "account .hashchain_store {:?}", @@ -744,6 +749,7 @@ async fn test_simulate_transactions() { .batch_metadata .next_full_batch_index; let batch = output_account + .batch_metadata .batches .get(next_full_batch as usize) .unwrap(); @@ -799,8 +805,8 @@ async fn test_simulate_transactions() { let old_account = BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batches[0]); - println!("batch 1: {:?}", output_account.batches[1]); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); assert_batch_append_event_event( batch_append_event, new_root, @@ -824,8 +830,8 @@ async fn test_simulate_transactions() { } let output_account = BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batches[0]); - println!("batch 1: {:?}", output_account.batches[1]); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); println!("num_output_updates: {}", num_output_updates); println!("num_input_updates: {}", num_input_updates); println!("num_output_values: {}", num_output_values); @@ -915,7 +921,6 @@ async fn test_e2e() { let pre_output_account = BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); let pre_account = *pre_output_account.get_metadata(); - let pre_batches = pre_output_account.batches; let pre_value_store = pre_output_account.value_vecs; let pre_hashchains = pre_output_account.hashchain_store; let mut output_account = @@ -925,7 +930,6 @@ async fn test_e2e() { .unwrap(); assert_output_queue_insert( pre_account, - pre_batches.to_vec(), pre_value_store, pre_hashchains, BatchedQueueAccount::output_from_bytes( @@ -941,6 +945,7 @@ async fn test_e2e() { let output_account = BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); out_ready_for_update = output_account + .batch_metadata .batches .iter() .any(|batch| batch.get_state() == BatchState::Full); @@ -957,7 +962,6 @@ async fn test_e2e() { let pre_mt_account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_account_bytes).unwrap(); let pre_account = *pre_mt_account.get_metadata(); - let pre_batches = pre_mt_account.batches; let pre_hashchains = pre_mt_account.hashchain_store; let pre_roots = pre_mt_account.root_history.iter().cloned().collect(); let tx_hash = create_hash_chain_from_slice(vec![leaf].as_slice()).unwrap(); @@ -985,7 +989,6 @@ async fn test_e2e() { BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); assert_nullifier_queue_insert( pre_account, - pre_batches, &mut vec![], pre_roots, pre_hashchains, @@ -1004,6 +1007,7 @@ async fn test_e2e() { BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); in_ready_for_update = merkle_tree_account + .queue_metadata .batches .iter() .any(|batch| batch.get_state() == BatchState::Full); @@ -1042,6 +1046,7 @@ async fn test_e2e() { .batch_metadata .next_full_batch_index; let batch = output_account + .batch_metadata .batches .get(next_full_batch as usize) .unwrap(); @@ -1117,8 +1122,8 @@ async fn test_e2e() { let old_account = BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batches[0]); - println!("batch 1: {:?}", output_account.batches[1]); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); assert_merkle_tree_update( old_account, account, @@ -1135,8 +1140,8 @@ async fn test_e2e() { } let output_account = BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batches[0]); - println!("batch 1: {:?}", output_account.batches[1]); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); println!("num_output_updates: {}", num_output_updates); println!("num_input_updates: {}", num_input_updates); println!("num_output_values: {}", num_output_values); @@ -1155,7 +1160,11 @@ pub async fn perform_input_update( let mut account = BatchedMerkleTreeAccount::state_from_bytes(mt_account_data).unwrap(); let next_full_batch = account.get_metadata().queue_metadata.next_full_batch_index; - let batch = account.batches.get(next_full_batch as usize).unwrap(); + let batch = account + .queue_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); let leaves_hashchain = account .hashchain_store .get(next_full_batch as usize) @@ -1216,7 +1225,11 @@ pub async fn perform_address_update( let next_full_batch = account.get_metadata().queue_metadata.next_full_batch_index; let next_index = account.get_metadata().next_index; println!("next index {:?}", next_index); - let batch = account.batches.get(next_full_batch as usize).unwrap(); + let batch = account + .queue_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); let batch_start_index = batch.start_index; let leaves_hashchain = account .hashchain_store @@ -1264,7 +1277,11 @@ pub async fn perform_address_update( let account = BatchedMerkleTreeAccount::address_from_bytes(mt_account_data).unwrap(); { - let batch = account.batches.get(pre_next_full_batch as usize).unwrap(); + let batch = account + .queue_metadata + .batches + .get(pre_next_full_batch as usize) + .unwrap(); if batch.get_state() == BatchState::Inserted { mock_indexer.finalize_batch_address_update(batch.batch_size as usize); } @@ -1275,137 +1292,118 @@ pub async fn perform_address_update( } fn assert_merkle_tree_update( - old_account: BatchedMerkleTreeAccount, + mut old_account: BatchedMerkleTreeAccount, account: BatchedMerkleTreeAccount, old_queue_account: Option, queue_account: Option, root: [u8; 32], ) { - let mut expected_account = *old_account.get_metadata(); - expected_account.sequence_number += 1; - let actual_account = *account.get_metadata(); - // We only have two batches. - let previous_full_batch_index = if expected_account.queue_metadata.next_full_batch_index == 0 { - 1 + // Output queue update + if let Some(mut old_queue_account) = old_queue_account { + let queue_account = queue_account.unwrap(); + let old_full_batch_index = old_queue_account.batch_metadata.next_full_batch_index; + let old_full_batch = old_queue_account + .batch_metadata + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + println!("old full batch {:?}", old_full_batch); + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + old_account.root_history.capacity() as u32, + ) + .unwrap(); + + if old_full_batch.get_state() == BatchState::Inserted { + old_queue_account.batch_metadata.next_full_batch_index += 1; + old_queue_account.batch_metadata.next_full_batch_index %= 2; + } + assert_eq!( + queue_account.get_metadata(), + old_queue_account.get_metadata() + ); + assert_eq!(queue_account, old_queue_account); + // Only the output queue appends state + let zkp_batch_size = old_account.queue_metadata.zkp_batch_size; + old_account.next_index += zkp_batch_size; } else { - 0 - }; + // Input queue update + let old_full_batch_index = old_account.queue_metadata.next_full_batch_index; + let history_capacity = old_account.root_history.capacity(); + let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; - let (batches, mut previous_batches, expected_queue_account, mut next_full_batch_index) = - if let Some(queue_account) = queue_account.as_ref() { - let expected_queue_account = *old_queue_account.as_ref().unwrap().get_metadata(); - expected_account.next_index += queue_account.batches.get(0).unwrap().zkp_batch_size; - let next_full_batch_index = expected_queue_account.batch_metadata.next_full_batch_index; - ( - queue_account.batches.to_vec(), - old_queue_account.as_ref().unwrap().batches.to_vec(), - Some(expected_queue_account), - next_full_batch_index, - ) - } else { - let mut batches = old_account.batches.to_vec(); - println!("previous_full_batch_index: {:?}", previous_full_batch_index); - let previous_batch = batches.get_mut(previous_full_batch_index as usize).unwrap(); - println!("previous_batch state: {:?}", previous_batch.get_state()); - println!( - "previous_batch zeroed?: {:?}", - previous_batch.bloom_filter_is_zeroed() - ); - let previous_batch_is_ready = previous_batch.get_state() == BatchState::Inserted - && !previous_batch.bloom_filter_is_zeroed(); - let batch = batches - .get_mut(old_account.queue_metadata.next_full_batch_index as usize) - .unwrap(); + let old_full_batch = old_account + .queue_metadata + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); - println!("previous_batch_is_ready: {:?}", previous_batch_is_ready); - println!( - "batch.bloom_filter_is_zeroed(): {:?}", - batch.bloom_filter_is_zeroed() - ); - println!( - "batch.get_num_inserted_elements(): {:?}", - batch.get_num_inserted_elements() + batch.zkp_batch_size - ); - println!("batch.batch_size: {:?}", batch.batch_size); - println!(" batch.get_num_inserted_elements() >= batch.batch_size / 2 && previous_batch_is_ready: {:?}", batch.get_num_inserted_elements()+ batch.zkp_batch_size >= batch.batch_size / 2); - let zeroed_batch = batch.get_num_inserted_elements() + batch.zkp_batch_size - >= batch.batch_size / 2 - && previous_batch_is_ready; - let previous_batch = batches.get_mut(previous_full_batch_index as usize).unwrap(); - - if zeroed_batch { - previous_batch.set_bloom_filter_to_zeroed(); - println!("set bloom filter is zeroed"); - } - (account.batches.to_vec(), batches, None, 0) - }; + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + history_capacity as u32, + ) + .unwrap(); + println!( + "current batch {:?}", + old_full_batch.get_num_inserted_elements() + ); - let mut checked_one = false; + if old_full_batch.get_state() == BatchState::Inserted { + old_account.queue_metadata.next_full_batch_index += 1; + old_account.queue_metadata.next_full_batch_index %= 2; + } + let old_full_batch_index = old_account.queue_metadata.next_full_batch_index; - for (i, batch) in batches.iter().enumerate() { - let previous_batch = previous_batches.get_mut(i).unwrap(); + let old_full_batch = old_account + .queue_metadata + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + let zeroed_batch = + old_full_batch.get_num_inserted_elements() >= old_full_batch.batch_size / 2; + println!("zeroed_batch: {:?}", zeroed_batch); - let expected_sequence_number = - account.root_history.capacity() as u64 + account.get_metadata().sequence_number; - let batch_fully_inserted = batch.sequence_number == expected_sequence_number - && batch.get_state() == BatchState::Inserted; + // let current_batch = old_account.queue_metadata.get_current_batch(); - let updated_batch = previous_batch.get_first_ready_zkp_batch().is_ok() && !checked_one; + let state = old_account.queue_metadata.batches[previous_full_batch_index].get_state(); + let previous_batch = old_account + .queue_metadata + .batches + .get_mut(previous_full_batch_index) + .unwrap(); - // Assert fully inserted batch - if batch_fully_inserted { - if queue_account.is_some() { - next_full_batch_index += 1; - next_full_batch_index %= expected_queue_account.unwrap().batch_metadata.num_batches; - } else { - expected_account.queue_metadata.next_full_batch_index += 1; - expected_account.queue_metadata.next_full_batch_index %= - expected_account.queue_metadata.num_batches; + if zeroed_batch && state == BatchState::Inserted { + previous_batch.set_bloom_filter_to_zeroed(); + let sequence_number = previous_batch.sequence_number; + let overlapping_roots_exits = sequence_number > old_account.sequence_number; + if overlapping_roots_exits { + old_account.bloom_filter_stores[previous_full_batch_index] + .iter_mut() + .for_each(|elem| { + *elem = 0; + }); + + let mut oldest_root_index = old_account.root_history.first_index(); + + let num_remaining_roots = sequence_number - old_account.sequence_number; + for _ in 1..num_remaining_roots { + println!("zeroing out root index: {}", oldest_root_index); + old_account.root_history[oldest_root_index] = [0u8; 32]; + oldest_root_index += 1; + oldest_root_index %= old_account.root_history.len(); + } } - assert_eq!(batch.root_index as usize, account.root_history.last_index()); - assert_eq!(batch.get_num_inserted_zkps(), 0); - assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); - assert_eq!(batch.get_num_inserted(), 0); - assert_ne!(batch.sequence_number, previous_batch.sequence_number); - assert_eq!(batch.get_current_zkp_batch_index(), 0); - assert_ne!(batch.get_state(), previous_batch.get_state()); } - // assert updated batch - else if updated_batch { - checked_one = true; - assert_eq!( - batch.get_num_inserted_zkps(), - previous_batch.get_num_inserted_zkps() + 1 - ); - assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); - - assert_eq!(batch.sequence_number, previous_batch.sequence_number); - assert_eq!(batch.root_index, previous_batch.root_index); - assert_eq!( - batch.get_current_zkp_batch_index(), - previous_batch.get_current_zkp_batch_index() - ); - assert_eq!(batch.get_state(), previous_batch.get_state()); - assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); - } else { - assert_eq!(*batch, *previous_batch); - } - } - if let Some(queue_account) = queue_account.as_ref() { - let mut expected_queue_account = expected_queue_account.unwrap(); - expected_queue_account.batch_metadata.next_full_batch_index = next_full_batch_index; - assert_eq!(*queue_account.get_metadata(), expected_queue_account); } - assert_eq!(actual_account, expected_account); - for (i, root) in account.root_history.iter().enumerate() { - println!("current: i {:?}", i); - println!("current: root {:?}", root); - } - for (i, root) in old_account.root_history.iter().enumerate() { - println!("old_account: i {:?}", i); - println!("old_account: root {:?}", root); - } + old_account.sequence_number += 1; + old_account.root_history.push(root); + assert_eq!(account.get_metadata(), old_account.get_metadata()); + assert_eq!(account, old_account); assert_eq!(*account.root_history.last().unwrap(), root); } @@ -1419,7 +1417,7 @@ pub fn get_rnd_bytes(rng: &mut StdRng) -> [u8; 32] { #[tokio::test] async fn test_fill_queues_completely() { spawn_prover( - true, + false, ProverConfig { run_mode: None, circuits: vec![ @@ -1480,7 +1478,6 @@ async fn test_fill_queues_completely() { let pre_output_account = BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_account_data).unwrap(); let pre_account = *pre_output_account.get_metadata(); - let pre_batches = pre_output_account.batches.to_vec(); let pre_value_store = pre_output_account.value_vecs; let pre_hashchains = pre_output_account.hashchain_store; @@ -1492,7 +1489,6 @@ async fn test_fill_queues_completely() { .unwrap(); assert_output_queue_insert( pre_account, - pre_batches, pre_value_store, pre_hashchains, BatchedQueueAccount::output_from_bytes( @@ -1512,6 +1508,7 @@ async fn test_fill_queues_completely() { assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); output_account + .batch_metadata .batches .iter() .for_each(|b| assert_eq!(b.get_state(), BatchState::Full)); @@ -1534,6 +1531,7 @@ async fn test_fill_queues_completely() { .batch_metadata .next_full_batch_index; let batch = output_account + .batch_metadata .batches .get(next_full_batch as usize) .unwrap(); @@ -1601,7 +1599,6 @@ async fn test_fill_queues_completely() { let pre_merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data).unwrap(); let pre_account = *pre_merkle_tree_account.get_metadata(); - let pre_batches = pre_merkle_tree_account.batches; let pre_roots = pre_merkle_tree_account .root_history .iter() @@ -1630,7 +1627,6 @@ async fn test_fill_queues_completely() { .unwrap(); assert_nullifier_queue_insert( pre_account, - pre_batches, &mut vec![], pre_roots, pre_hashchains, @@ -1701,12 +1697,12 @@ async fn test_fill_queues_completely() { if i >= 7 { let merkle_tree_account = &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - let batch = merkle_tree_account.batches.get(0).unwrap(); + let batch = merkle_tree_account.queue_metadata.batches.first().unwrap(); assert!(batch.bloom_filter_is_zeroed()); } else { let merkle_tree_account = &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - let batch = merkle_tree_account.batches.get(0).unwrap(); + let batch = merkle_tree_account.queue_metadata.batches.first().unwrap(); assert!(!batch.bloom_filter_is_zeroed()); } println!( @@ -1732,7 +1728,12 @@ async fn test_fill_queues_completely() { { let merkle_tree_account = &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - for (i, batch) in merkle_tree_account.batches.iter().enumerate() { + for (i, batch) in merkle_tree_account + .queue_metadata + .batches + .iter() + .enumerate() + { assert_eq!(batch.get_state(), BatchState::Inserted); if i == 0 { assert!(batch.bloom_filter_is_zeroed()); @@ -1745,7 +1746,7 @@ async fn test_fill_queues_completely() { { let merkle_tree_account = &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - let pre_batch_zero = *merkle_tree_account.batches.get(0).unwrap(); + let pre_batch_zero = *merkle_tree_account.queue_metadata.batches.first().unwrap(); let value = &get_rnd_bytes(&mut rng); let tx_hash = &get_rnd_bytes(&mut rng); @@ -1753,7 +1754,7 @@ async fn test_fill_queues_completely() { .insert_nullifier_into_current_batch(value, 0, tx_hash) .unwrap(); { - let post_batch = *merkle_tree_account.batches.get(0).unwrap(); + let post_batch = *merkle_tree_account.queue_metadata.batches.first().unwrap(); assert_eq!(post_batch.get_state(), BatchState::Fill); assert_eq!(post_batch.get_num_inserted(), 1); let bloom_filter_store = @@ -1761,7 +1762,7 @@ async fn test_fill_queues_completely() { let mut bloom_filter = BloomFilter::new( params.bloom_filter_num_iters as usize, params.bloom_filter_capacity, - bloom_filter_store.as_mut_slice(), + bloom_filter_store, ) .unwrap(); assert!(bloom_filter.contains(value)); @@ -1851,7 +1852,6 @@ async fn test_fill_address_tree_completely() { let pre_merkle_tree_account = BatchedMerkleTreeAccount::address_from_bytes(&mut pre_account_data).unwrap(); let pre_account = *pre_merkle_tree_account.get_metadata(); - let pre_batches = pre_merkle_tree_account.batches; let pre_roots = pre_merkle_tree_account .root_history .iter() @@ -1865,7 +1865,6 @@ async fn test_fill_address_tree_completely() { .unwrap(); assert_input_queue_insert( pre_account, - pre_batches, &mut vec![], pre_roots, pre_hashchains, @@ -1929,8 +1928,8 @@ async fn test_fill_address_tree_completely() { } let merkle_tree_account = BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data).unwrap(); - let batch = merkle_tree_account.batches.get(0).unwrap(); - let batch_one = merkle_tree_account.batches.get(1).unwrap(); + let batch = merkle_tree_account.queue_metadata.batches.first().unwrap(); + let batch_one = merkle_tree_account.queue_metadata.batches.get(1).unwrap(); assert!(!batch_one.bloom_filter_is_zeroed()); if i >= 7 { @@ -1943,7 +1942,12 @@ async fn test_fill_address_tree_completely() { { let merkle_tree_account = &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data).unwrap(); - for (i, batch) in merkle_tree_account.batches.iter().enumerate() { + for (i, batch) in merkle_tree_account + .queue_metadata + .batches + .iter() + .enumerate() + { assert_eq!(batch.get_state(), BatchState::Inserted); if i == 0 { assert!(batch.bloom_filter_is_zeroed()); @@ -1956,7 +1960,7 @@ async fn test_fill_address_tree_completely() { let merkle_tree_account = &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data).unwrap(); println!("root history {:?}", merkle_tree_account.root_history); - let pre_batch_zero = *merkle_tree_account.batches.get(0).unwrap(); + let pre_batch_zero = *merkle_tree_account.queue_metadata.batches.first().unwrap(); for root in merkle_tree_account.root_history.iter() { println!("root {:?}", root); diff --git a/program-libs/batched-merkle-tree/tests/queue.rs b/program-libs/batched-merkle-tree/tests/queue.rs index f78e97dd28..caf905d86c 100644 --- a/program-libs/batched-merkle-tree/tests/queue.rs +++ b/program-libs/batched-merkle-tree/tests/queue.rs @@ -1,4 +1,5 @@ use light_batched_merkle_tree::{ + batch::Batch, batch_metadata::BatchMetadata, errors::BatchedMerkleTreeError, queue::{assert_queue_zero_copy_inited, BatchedQueueAccount, BatchedQueueMetadata}, @@ -34,6 +35,7 @@ pub fn get_test_account_and_account_data( next_full_batch_index: 0, bloom_filter_capacity, zkp_batch_size: 10, + batches: [Batch::default(); 2], }, ..Default::default() }; @@ -73,7 +75,7 @@ fn test_output_queue_account() { ) .unwrap(); - assert_queue_zero_copy_inited(&mut account_data, ref_account, bloom_filter_num_iters); + assert_queue_zero_copy_inited(&mut account_data, ref_account); let mut account = BatchedQueueAccount::output_from_bytes(&mut account_data).unwrap(); let value = [1u8; 32]; account.insert_into_current_batch(&value).unwrap(); diff --git a/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs b/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs index 3770572304..94e25a8d57 100644 --- a/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs +++ b/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs @@ -41,11 +41,7 @@ fn test_rollover() { let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(create_tree_params, merkle_tree_rent); - assert_address_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); let mut new_mt_account_data = vec![0; mt_account_size]; let new_mt_pubkey = Pubkey::new_unique(); @@ -111,7 +107,6 @@ fn test_rollover() { new_mt_account_data.to_vec(), new_ref_mt_account, new_mt_pubkey, - params.bloom_filter_num_iters, ); } // 4. Failing: already rolled over @@ -220,11 +215,7 @@ fn test_rnd_rollover() { let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(create_tree_params, merkle_tree_rent); - assert_address_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); let mut new_mt_data = vec![0; mt_account_size]; let new_mt_rent = merkle_tree_rent; let network_fee = params.network_fee; @@ -250,7 +241,6 @@ fn test_rnd_rollover() { new_mt_data, new_ref_mt_account, new_mt_pubkey, - params.bloom_filter_num_iters, ); } } diff --git a/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs b/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs index e9143512bc..6acefef930 100644 --- a/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs +++ b/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs @@ -69,20 +69,12 @@ fn test_rollover() { let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(create_tree_params, queue_pubkey); - assert_state_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); let total_rent = merkle_tree_rent + additional_bytes_rent + queue_rent; let output_queue_params = CreateOutputQueueParams::from(params, owner, total_rent, mt_pubkey); let ref_output_queue_account = create_output_queue_account(output_queue_params); - assert_queue_zero_copy_inited( - queue_account_data.as_mut_slice(), - ref_output_queue_account, - 0, - ); + assert_queue_zero_copy_inited(queue_account_data.as_mut_slice(), ref_output_queue_account); let mut new_mt_account_data = vec![0; mt_account_size]; let new_mt_pubkey = Pubkey::new_unique(); @@ -265,7 +257,6 @@ fn test_rollover() { new_mt_account_data: new_mt_account_data.to_vec(), old_mt_pubkey: mt_pubkey, new_mt_pubkey, - bloom_filter_num_iters: params.bloom_filter_num_iters, ref_rolledover_mt, queue_account_data: queue_account_data.to_vec(), ref_queue_account: new_ref_output_queue_account, @@ -393,11 +384,7 @@ fn test_rollover() { .rollover_metadata .network_fee = 0; ref_output_queue_account.metadata.access_metadata.forester = forester; - assert_queue_zero_copy_inited( - queue_account_data.as_mut_slice(), - ref_output_queue_account, - 0, - ); + assert_queue_zero_copy_inited(queue_account_data.as_mut_slice(), ref_output_queue_account); // 8. Functional: rollover address tree with network fee 0 additional bytes 0 { let merkle_tree = @@ -443,7 +430,6 @@ fn test_rollover() { new_mt_account_data: new_mt_account_data.to_vec(), old_mt_pubkey: mt_pubkey, new_mt_pubkey, - bloom_filter_num_iters: params.bloom_filter_num_iters, ref_rolledover_mt, queue_account_data: queue_account_data.to_vec(), ref_queue_account: new_ref_output_queue_account, @@ -553,17 +539,12 @@ fn test_rnd_rollover() { assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, - 0, ); let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(create_tree_params, output_queue_pubkey); - assert_state_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); let mut new_mt_account_data = vec![0; mt_account_size]; let new_mt_pubkey = Pubkey::new_unique(); @@ -612,7 +593,6 @@ fn test_rnd_rollover() { new_mt_account_data: new_mt_account_data.to_vec(), old_mt_pubkey: mt_pubkey, new_mt_pubkey, - bloom_filter_num_iters: params.bloom_filter_num_iters, ref_rolledover_mt, queue_account_data: output_queue_account_data.to_vec(), ref_queue_account: new_ref_output_queue_account, diff --git a/program-libs/zero-copy/src/slice_mut.rs b/program-libs/zero-copy/src/slice_mut.rs index 6838825608..393b6ca4ed 100644 --- a/program-libs/zero-copy/src/slice_mut.rs +++ b/program-libs/zero-copy/src/slice_mut.rs @@ -75,10 +75,12 @@ where Ok((slices, bytes)) } + #[inline] pub fn from_bytes(bytes: &'a mut [u8]) -> Result { Ok(Self::from_bytes_at(bytes)?.0) } + #[inline] pub fn from_bytes_at( bytes: &'a mut [u8], ) -> Result<(ZeroCopySliceMut<'a, L, T, PAD>, &'a mut [u8]), ZeroCopyError> { @@ -108,6 +110,7 @@ where Ok((ZeroCopySliceMut { length, bytes }, remaining_bytes)) } + #[inline] pub fn from_bytes_at_multiple( num_slices: usize, mut bytes: &'a mut [u8], diff --git a/program-libs/zero-copy/src/vec.rs b/program-libs/zero-copy/src/vec.rs index 144a2ee9dc..33343c1765 100644 --- a/program-libs/zero-copy/src/vec.rs +++ b/program-libs/zero-copy/src/vec.rs @@ -67,10 +67,12 @@ where Ok((value_vecs, bytes)) } + #[inline] pub fn from_bytes(bytes: &'a mut [u8]) -> Result { Ok(Self::from_bytes_at(bytes)?.0) } + #[inline] pub fn from_bytes_at(bytes: &'a mut [u8]) -> Result<(Self, &'a mut [u8]), ZeroCopyError> { let (meta_data, bytes) = bytes.split_at_mut(Self::metadata_size()); let (length, _padding) = Ref::<&mut [u8], L>::from_prefix(meta_data) @@ -79,6 +81,7 @@ where Ok((Self { length, slice }, bytes)) } + #[inline] pub fn from_bytes_at_multiple( num: usize, mut bytes: &'a mut [u8], diff --git a/program-tests/account-compression-test/tests/batched_merkle_tree_test.rs b/program-tests/account-compression-test/tests/batched_merkle_tree_test.rs index 0fddfea1ec..cc6f369829 100644 --- a/program-tests/account-compression-test/tests/batched_merkle_tree_test.rs +++ b/program-tests/account-compression-test/tests/batched_merkle_tree_test.rs @@ -172,7 +172,6 @@ async fn test_batch_state_merkle_tree() { assert_state_mt_zero_copy_inited( &mut merkle_tree.account.data.as_mut_slice(), ref_mt_account, - params.bloom_filter_num_iters, ); let output_queue_params = CreateOutputQueueParams::from( params, @@ -184,7 +183,6 @@ async fn test_batch_state_merkle_tree() { assert_queue_zero_copy_inited( &mut queue.account.data.as_mut_slice(), ref_output_queue_account, - 0, ); } let mut mock_indexer = MockBatchedForester::<32>::default(); @@ -743,6 +741,7 @@ pub async fn create_append_batch_ix_data( .batch_metadata .next_full_batch_index; let batch = output_zero_copy_account + .batch_metadata .batches .get(next_full_batch as usize) .unwrap(); @@ -779,13 +778,14 @@ pub async fn create_nullify_batch_ix_data( ) -> InstructionDataBatchNullifyInputs { let zero_copy_account: BatchedMerkleTreeAccount = BatchedMerkleTreeAccount::state_from_bytes(account_data).unwrap(); - println!("batches {:?}", zero_copy_account.batches); + println!("batches {:?}", zero_copy_account.queue_metadata.batches); let next_full_batch = zero_copy_account .get_metadata() .queue_metadata .next_full_batch_index; let batch = zero_copy_account + .queue_metadata .batches .get(next_full_batch as usize) .unwrap(); @@ -876,11 +876,7 @@ async fn test_init_batch_state_merkle_trees() { BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey.into()); let mut tree_data = merkle_tree.account.data.clone(); - assert_state_mt_zero_copy_inited( - &mut tree_data.as_mut_slice(), - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_state_mt_zero_copy_inited(&mut tree_data.as_mut_slice(), ref_mt_account); let output_queue_params = CreateOutputQueueParams::from( *params, owner.into(), @@ -892,7 +888,6 @@ async fn test_init_batch_state_merkle_trees() { assert_queue_zero_copy_inited( &mut queue.account.data.as_mut_slice(), ref_output_queue_account, - 0, ); } } @@ -1204,8 +1199,8 @@ pub async fn perform_rollover_batch_state_merkle_tree( let mut account = rpc.get_account(old_merkle_tree_pubkey).await?.unwrap(); let old_merkle_tree = BatchedMerkleTreeAccount::state_from_bytes(account.data.as_mut_slice()).unwrap(); - let batch_zero = &old_merkle_tree.batches[0]; - let num_batches = old_merkle_tree.batches.len(); + let num_batches = old_merkle_tree.queue_metadata.batches.len(); + let batch_zero = &old_merkle_tree.queue_metadata.batches[0]; let old_merkle_tree = old_merkle_tree.get_metadata(); let mt_account_size = get_merkle_tree_account_size( batch_zero.batch_size, @@ -1224,7 +1219,7 @@ pub async fn perform_rollover_batch_state_merkle_tree( let mut account = rpc.get_account(old_output_queue_pubkey).await?.unwrap(); let old_queue_account = BatchedQueueAccount::output_from_bytes(account.data.as_mut_slice()).unwrap(); - let batch_zero = &old_queue_account.batches[0]; + let batch_zero = &old_queue_account.batch_metadata.batches[0]; let queue_account_size = get_output_queue_account_size( batch_zero.batch_size, batch_zero.zkp_batch_size, @@ -1412,11 +1407,7 @@ async fn test_init_batch_address_merkle_trees() { let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(mt_params, mt_rent); let mut tree_data = merkle_tree.account.data.clone(); - assert_address_mt_zero_copy_inited( - &mut tree_data.as_mut_slice(), - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_address_mt_zero_copy_inited(&mut tree_data.as_mut_slice(), ref_mt_account); } } pub async fn perform_init_batch_address_merkle_tree( @@ -1888,6 +1879,7 @@ pub async fn update_batch_address_tree( .next_full_batch_index; let batch = zero_copy_account + .queue_metadata .batches .get(next_full_batch as usize) .unwrap(); diff --git a/program-tests/registry-test/tests/tests.rs b/program-tests/registry-test/tests/tests.rs index 6785088f51..1339bbc18d 100644 --- a/program-tests/registry-test/tests/tests.rs +++ b/program-tests/registry-test/tests/tests.rs @@ -2040,7 +2040,7 @@ async fn test_rollover_batch_address_tree() { let mt_params = CreateTreeParams::from_address_ix_params(tree_params, env.group_pda.into()); let zero_copy_account = BatchedMerkleTreeMetadata::new_address_tree(mt_params, account.lamports); - assert_address_mt_zero_copy_inited(&mut account.data, zero_copy_account, 3); + assert_address_mt_zero_copy_inited(&mut account.data, zero_copy_account); // Create one address to pay for rollover fees. perform_create_pda_with_event_rnd(&mut test_indexer, &mut rpc, &env, &payer) .await diff --git a/program-tests/system-cpi-test/tests/test.rs b/program-tests/system-cpi-test/tests/test.rs index 49328bd54a..f3061192f3 100644 --- a/program-tests/system-cpi-test/tests/test.rs +++ b/program-tests/system-cpi-test/tests/test.rs @@ -757,13 +757,7 @@ async fn only_test_create_pda() { CreatePdaMode::InvalidReadOnlyMerkleTree, ) .await; - assert_rpc_error( - result, - 0, - // UtilsError::AccountNotMutable.into(), - UtilsError::InvalidDiscriminator.into(), - ) - .unwrap(); + assert_rpc_error(result, 0, UtilsError::InvalidDiscriminator.into()).unwrap(); let result = perform_create_pda_with_event( &mut test_indexer, diff --git a/program-tests/utils/src/assert_compressed_tx.rs b/program-tests/utils/src/assert_compressed_tx.rs index 3dd9222cf6..a7ebc579bc 100644 --- a/program-tests/utils/src/assert_compressed_tx.rs +++ b/program-tests/utils/src/assert_compressed_tx.rs @@ -2,7 +2,7 @@ use account_compression::{state::QueueAccount, StateMerkleTreeAccount}; use anchor_lang::Discriminator; use forester_utils::{get_concurrent_merkle_tree, get_hash_set, AccountZeroCopy}; use light_batched_merkle_tree::{ - merkle_tree::BatchedMerkleTreeAccount, queue::BatchedQueueMetadata, + batch::Batch, merkle_tree::BatchedMerkleTreeAccount, queue::BatchedQueueMetadata, }; use light_client::{ indexer::{Indexer, StateMerkleTreeAccounts}, @@ -154,14 +154,15 @@ pub async fn assert_nullifiers_exist_in_hash_sets( let mut merkle_tree = BatchedMerkleTreeAccount::state_from_bytes(&mut merkle_tree_account_data) .unwrap(); - let mut batches = merkle_tree.batches; + let mut batches = merkle_tree.queue_metadata.batches; batches.iter_mut().enumerate().any(|(i, batch)| { - batch - .check_non_inclusion( - hash, - merkle_tree.bloom_filter_stores[i].as_mut_slice(), - ) - .is_err() + Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + hash, + merkle_tree.bloom_filter_stores[i], + ) + .is_err() }); } _ => { @@ -190,24 +191,26 @@ pub async fn assert_addresses_exist_in_hash_sets( let mut account_data = account.data.clone(); let mut merkle_tree = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data).unwrap(); - let mut batches = merkle_tree.batches; + let mut batches = merkle_tree.queue_metadata.batches; // Must be included in one batch batches.iter_mut().enumerate().any(|(i, batch)| { - batch - .check_non_inclusion( - address, - merkle_tree.bloom_filter_stores[i].as_mut_slice(), - ) - .is_err() + Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + address, + merkle_tree.bloom_filter_stores[i], + ) + .is_err() }); // must not be included in any other batch batches.iter_mut().enumerate().any(|(i, batch)| { - batch - .check_non_inclusion( - address, - merkle_tree.bloom_filter_stores[i].as_mut_slice(), - ) - .is_ok() + Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + address, + merkle_tree.bloom_filter_stores[i], + ) + .is_ok() }); } _ => { diff --git a/program-tests/utils/src/e2e_test_env.rs b/program-tests/utils/src/e2e_test_env.rs index 231ed6ac0a..dee6ab6d7d 100644 --- a/program-tests/utils/src/e2e_test_env.rs +++ b/program-tests/utils/src/e2e_test_env.rs @@ -534,6 +534,7 @@ where let next_full_batch_index = merkle_tree.queue_metadata.next_full_batch_index; let batch = merkle_tree + .queue_metadata .batches .get(next_full_batch_index as usize) .unwrap(); @@ -583,6 +584,7 @@ where let next_full_batch_index = output_queue.batch_metadata.next_full_batch_index; let batch = output_queue + .batch_metadata .batches .get(next_full_batch_index as usize) .unwrap(); diff --git a/programs/account-compression/src/instructions/append_leaves.rs b/programs/account-compression/src/instructions/append_leaves.rs index 6cde3dd29a..6fa1e76ee1 100644 --- a/programs/account-compression/src/instructions/append_leaves.rs +++ b/programs/account-compression/src/instructions/append_leaves.rs @@ -1,4 +1,8 @@ -use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey, Discriminator}; +use anchor_lang::{ + prelude::*, + solana_program::{log::sol_log_compute_units, pubkey::Pubkey}, + Discriminator, +}; use light_batched_merkle_tree::queue::{BatchedQueueAccount, BatchedQueueMetadata}; use light_hasher::Discriminator as HasherDiscriminator; @@ -188,24 +192,23 @@ fn insert_into_output_queue<'a, 'b, 'c: 'info, 'info>( batch_size: usize, leaves: &[(u8, [u8; 32])], ) -> Result { - let output_queue_zero_copy = - &mut BatchedQueueAccount::output_from_account_info(merkle_tree_acc_info) - .map_err(ProgramError::from)?; + msg!("output_queue_zero_copy"); + sol_log_compute_units(); + let output_queue = &mut BatchedQueueAccount::output_from_account_info(merkle_tree_acc_info) + .map_err(ProgramError::from)?; + sol_log_compute_units(); + sol_log_compute_units(); check_signer_is_registered_or_authority::( ctx, - output_queue_zero_copy, + output_queue, )?; for (_, leaf) in leaves { - output_queue_zero_copy + output_queue .insert_into_current_batch(leaf) .map_err(ProgramError::from)?; } - let rollover_fee = output_queue_zero_copy - .metadata - .rollover_metadata - .rollover_fee - * batch_size as u64; + let rollover_fee = output_queue.metadata.rollover_metadata.rollover_fee * batch_size as u64; Ok(rollover_fee) } diff --git a/programs/account-compression/src/instructions/migrate_state.rs b/programs/account-compression/src/instructions/migrate_state.rs index bfbf70a569..fbb8bf6453 100644 --- a/programs/account-compression/src/instructions/migrate_state.rs +++ b/programs/account-compression/src/instructions/migrate_state.rs @@ -136,6 +136,7 @@ fn migrate_state( #[cfg(test)] mod migrate_state_test { use light_batched_merkle_tree::{ + batch::Batch, batch_metadata::BatchMetadata, queue::{BatchedQueueAccount, BatchedQueueMetadata}, }; @@ -166,17 +167,21 @@ mod migrate_state_test { queue_type: QueueType::BatchedOutput as u64, associated_merkle_tree: Pubkey::new_unique().into(), }; - + let batch_size = 1000; let account = BatchedQueueMetadata { metadata, next_index: 0, batch_metadata: BatchMetadata { - batch_size: 1000, + batch_size, num_batches: 2, currently_processing_batch_index: 0, next_full_batch_index: 0, bloom_filter_capacity: 0, zkp_batch_size: 10, + batches: [ + Batch::new(0, 0, batch_size, 10, 0), + Batch::new(0, 0, batch_size, 10, batch_size), + ], }, tree_capacity: 2u64.pow(32), }; diff --git a/programs/system/src/invoke/verify_proof.rs b/programs/system/src/invoke/verify_proof.rs index c998e03abf..c295ea6c4d 100644 --- a/programs/system/src/invoke/verify_proof.rs +++ b/programs/system/src/invoke/verify_proof.rs @@ -3,7 +3,7 @@ use std::mem; use account_compression::{ errors::AccountCompressionErrorCode, AddressMerkleTreeAccount, StateMerkleTreeAccount, }; -use anchor_lang::{prelude::*, Discriminator}; +use anchor_lang::{prelude::*, solana_program::log::sol_log_compute_units, Discriminator}; use light_batched_merkle_tree::{ constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT}, merkle_tree::BatchedMerkleTreeAccount, @@ -196,9 +196,12 @@ fn read_root( } BatchedMerkleTreeAccount::DISCRIMINATOR => { if IS_STATE { + msg!("state_from_account_info"); + sol_log_compute_units(); let merkle_tree = BatchedMerkleTreeAccount::state_from_account_info(merkle_tree_account_info) .map_err(ProgramError::from)?; + sol_log_compute_units(); (*roots).push(merkle_tree.root_history[root_index as usize]); height = merkle_tree.height as u8; } else { diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index 0baa0bdc7b..93c8a539af 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -835,7 +835,7 @@ where BatchedMerkleTreeAccount::state_from_bytes(merkle_tree_account.data.as_mut_slice()) .unwrap(); - let batch = &merkle_tree.batches[batch_index]; + let batch = &merkle_tree.queue_metadata.batches[batch_index]; if batch.get_state() == BatchState::Inserted || batch.get_state() == BatchState::Full { let batch_size = batch.zkp_batch_size; let leaf_indices_tx_hashes = diff --git a/sdk-libs/program-test/src/test_batch_forester.rs b/sdk-libs/program-test/src/test_batch_forester.rs index 381a86f4ec..ddfa0d3509 100644 --- a/sdk-libs/program-test/src/test_batch_forester.rs +++ b/sdk-libs/program-test/src/test_batch_forester.rs @@ -120,7 +120,8 @@ pub async fn create_append_batch_ix_data( let leaves = bundle.output_queue_elements.to_vec(); - let num_inserted_zkps = output_queue.batches[full_batch_index as usize].get_num_inserted_zkps(); + let num_inserted_zkps = + output_queue.batch_metadata.batches[full_batch_index as usize].get_num_inserted_zkps(); let leaves_hashchain = output_queue.hashchain_store[full_batch_index as usize][num_inserted_zkps as usize]; let (proof, new_root) = { @@ -274,7 +275,7 @@ pub async fn get_batched_nullify_ix_data( .unwrap(); let zkp_batch_size = merkle_tree.queue_metadata.zkp_batch_size; let full_batch_index = merkle_tree.queue_metadata.next_full_batch_index; - let full_batch = &merkle_tree.batches[full_batch_index as usize]; + let full_batch = &merkle_tree.queue_metadata.batches[full_batch_index as usize]; let zkp_batch_index = full_batch.get_num_inserted_zkps(); let leaves_hashchain = merkle_tree.hashchain_store[full_batch_index as usize][zkp_batch_index as usize]; @@ -498,11 +499,7 @@ pub async fn assert_registry_created_batched_state_merkle_tree let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey.into()); - assert_state_mt_zero_copy_inited( - merkle_tree.account.data.as_mut_slice(), - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_state_mt_zero_copy_inited(merkle_tree.account.data.as_mut_slice(), ref_mt_account); let queue_account_size = get_output_queue_account_size( params.output_queue_batch_size, @@ -538,11 +535,7 @@ pub async fn assert_registry_created_batched_state_merkle_tree ); let ref_output_queue_account = create_output_queue_account(queue_params); - assert_queue_zero_copy_inited( - queue.account.data.as_mut_slice(), - ref_output_queue_account, - 0, // output queue doesn't have a bloom filter hence no iterations - ); + assert_queue_zero_copy_inited(queue.account.data.as_mut_slice(), ref_output_queue_account); Ok(()) } #[allow(clippy::too_many_arguments)] @@ -562,8 +555,8 @@ pub async fn perform_rollover_batch_state_merkle_tree( let mut account = rpc.get_account(old_merkle_tree_pubkey).await?.unwrap(); let old_merkle_tree = BatchedMerkleTreeAccount::state_from_bytes(account.data.as_mut_slice()).unwrap(); - let batch_zero = &old_merkle_tree.batches[0]; - let num_batches = old_merkle_tree.batches.len(); + let num_batches = old_merkle_tree.queue_metadata.batches.len(); + let batch_zero = &old_merkle_tree.queue_metadata.batches[0]; let mt_account_size = get_merkle_tree_account_size( batch_zero.batch_size, batch_zero.bloom_filter_capacity, @@ -581,7 +574,7 @@ pub async fn perform_rollover_batch_state_merkle_tree( let mut account = rpc.get_account(old_output_queue_pubkey).await?.unwrap(); let old_queue_account = BatchedQueueAccount::output_from_bytes(account.data.as_mut_slice()).unwrap(); - let batch_zero = &old_queue_account.batches[0]; + let batch_zero = &old_queue_account.batch_metadata.batches[0]; let queue_account_size = get_output_queue_account_size( batch_zero.batch_size, batch_zero.zkp_batch_size, @@ -706,7 +699,6 @@ pub async fn assert_perform_state_mt_roll_over( new_mt_account_data: new_state_merkle_tree.data.to_vec(), old_mt_pubkey: old_state_merkle_tree_pubkey.into(), new_mt_pubkey: new_state_merkle_tree_pubkey.into(), - bloom_filter_num_iters: params.bloom_filter_num_iters, ref_mt_account: new_ref_mt_account, queue_account_data: old_queue_account_data.to_vec(), ref_rolledover_queue: ref_queue_account, @@ -780,11 +772,7 @@ pub async fn assert_registry_created_batched_address_merkle_tree( let mut account = rpc.get_account(old_merkle_tree_pubkey).await?.unwrap(); let old_merkle_tree = BatchedMerkleTreeAccount::address_from_bytes(account.data.as_mut_slice()).unwrap(); - let batch_zero = &old_merkle_tree.batches[0]; - let num_batches = old_merkle_tree.batches.len(); + let num_batches = old_merkle_tree.queue_metadata.batches.len(); + let batch_zero = &old_merkle_tree.queue_metadata.batches[0]; let mt_account_size = get_merkle_tree_account_size( batch_zero.batch_size, batch_zero.bloom_filter_capacity,