From 6b30abdc94318bfd7b35ecd2be05c27fc543d7ff Mon Sep 17 00:00:00 2001 From: ananas-block Date: Sun, 19 Jan 2025 21:24:45 +0000 Subject: [PATCH] wip --- program-libs/batched-merkle-tree/src/batch.rs | 103 ++-- .../batched-merkle-tree/src/batch_metadata.rs | 82 ++- .../src/initialize_state_tree.rs | 15 +- .../batched-merkle-tree/src/merkle_tree.rs | 116 +++-- .../src/merkle_tree_metadata.rs | 10 + program-libs/batched-merkle-tree/src/queue.rs | 183 +++---- .../src/rollover_address_tree.rs | 4 +- .../src/rollover_state_tree.rs | 13 +- .../tests/initialize_address_tree.rs | 21 +- .../tests/initialize_state_tree.rs | 21 +- .../batched-merkle-tree/tests/merkle_tree.rs | 466 +++++++++++++----- .../batched-merkle-tree/tests/queue.rs | 4 +- .../tests/rollover_address_tree.rs | 14 +- .../tests/rollover_state_tree.rs | 28 +- program-tests/system-cpi-test/tests/test.rs | 1 - 15 files changed, 646 insertions(+), 435 deletions(-) diff --git a/program-libs/batched-merkle-tree/src/batch.rs b/program-libs/batched-merkle-tree/src/batch.rs index 319bf51d14..c7560912e7 100644 --- a/program-libs/batched-merkle-tree/src/batch.rs +++ b/program-libs/batched-merkle-tree/src/batch.rs @@ -1,6 +1,7 @@ +use borsh::{BorshDeserialize, BorshSerialize}; use light_bloom_filter::BloomFilter; use light_hasher::{Hasher, Poseidon}; -use light_zero_copy::{slice_mut::ZeroCopySliceMutU64, vec::ZeroCopyVecU64}; +use light_zero_copy::vec::ZeroCopyVecU64; use solana_program::msg; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; @@ -43,13 +44,26 @@ impl From for u64 { /// - is part of a queue, by default a queue has two batches. /// - is inserted into the tree by zkp batch. #[repr(C)] -#[derive(Clone, Copy, Debug, PartialEq, Eq, KnownLayout, Immutable, IntoBytes, FromBytes)] +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + KnownLayout, + Immutable, + IntoBytes, + FromBytes, + Default, + BorshSerialize, + BorshDeserialize, +)] pub struct Batch { /// Number of inserted elements in the zkp batch. num_inserted: u64, state: u64, current_zkp_batch_index: u64, - num_inserted_zkps: u64, + pub num_inserted_zkps: u64, /// Number of iterations for the bloom_filter. pub num_iters: u64, /// Theoretical capacity of the bloom_filter. We want to make it much larger @@ -234,7 +248,7 @@ impl Batch { &mut self, bloom_filter_value: &[u8; 32], hashchain_value: &[u8; 32], - bloom_filter_stores: &mut [ZeroCopySliceMutU64], + bloom_filter_stores: &mut [&mut [u8]], hashchain_store: &mut ZeroCopyVecU64<[u8; 32]>, bloom_filter_index: usize, ) -> Result<(), BatchedMerkleTreeError> { @@ -251,13 +265,18 @@ impl Batch { BloomFilter::new( self.num_iters as usize, self.bloom_filter_capacity, - bloom_filter.as_mut_slice(), + bloom_filter, )? .insert(bloom_filter_value)?; // 3. Check that value is not in any other bloom filter. for bf_store in before.iter_mut().chain(after.iter_mut()) { - self.check_non_inclusion(bloom_filter_value, bf_store.as_mut_slice())?; + Self::check_non_inclusion( + self.num_iters as usize, + self.bloom_filter_capacity, + bloom_filter_value, + bf_store, + )?; } } Ok(()) @@ -310,12 +329,13 @@ impl Batch { /// Checks that value is not in the bloom filter. pub fn check_non_inclusion( - &self, + num_iters: usize, + bloom_filter_capacity: u64, value: &[u8; 32], store: &mut [u8], ) -> Result<(), BatchedMerkleTreeError> { - let mut bloom_filter = - BloomFilter::new(self.num_iters as usize, self.bloom_filter_capacity, store)?; + let mut bloom_filter = BloomFilter::new(num_iters, bloom_filter_capacity, store)?; + println!("Checking non inclusion"); if bloom_filter.contains(value) { return Err(BatchedMerkleTreeError::NonInclusionCheckFailed); } @@ -475,10 +495,10 @@ mod tests { fn test_insert() { // Behavior Input queue let mut batch = get_test_batch(); - let mut stores = vec![vec![0u8; 20_008]; 2]; + let mut stores = vec![vec![0u8; 20_000]; 2]; let mut bloom_filter_stores = stores .iter_mut() - .map(|store| ZeroCopySliceMutU64::new(20_000, store).unwrap()) + .map(|store| &mut store[..]) .collect::>(); let mut hashchain_store_bytes = vec![ 0u8; @@ -541,19 +561,24 @@ mod tests { let mut bloom_filter = BloomFilter { num_iters: batch.num_iters as usize, capacity: batch.bloom_filter_capacity, - store: bloom_filter_stores[processing_index].as_mut_slice(), + store: bloom_filter_stores[processing_index], }; assert!(bloom_filter.contains(&value)); let other_index = if processing_index == 0 { 1 } else { 0 }; - batch - .check_non_inclusion(&value, bloom_filter_stores[other_index].as_mut_slice()) - .unwrap(); - batch - .check_non_inclusion( - &value, - bloom_filter_stores[processing_index].as_mut_slice(), - ) - .unwrap_err(); + Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[other_index], + ) + .unwrap(); + Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[processing_index], + ) + .unwrap_err(); ref_batch.num_inserted += 1; if ref_batch.num_inserted == ref_batch.zkp_batch_size { @@ -611,10 +636,10 @@ mod tests { let mut batch = get_test_batch(); let value = [1u8; 32]; - let mut stores = vec![vec![0u8; 20_008]; 2]; + let mut stores = vec![vec![0u8; 20_000]; 2]; let mut bloom_filter_stores = stores .iter_mut() - .map(|store| ZeroCopySliceMutU64::new(20_000, store).unwrap()) + .map(|store| &mut store[..]) .collect::>(); let mut hashchain_store_bytes = vec![ 0u8; @@ -628,9 +653,15 @@ mod tests { ) .unwrap(); - assert!(batch - .check_non_inclusion(&value, bloom_filter_stores[processing_index].as_mut_slice()) - .is_ok()); + assert_eq!( + Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[processing_index] + ), + Ok(()) + ); let ref_batch = get_test_batch(); assert_eq!(batch, ref_batch); batch @@ -642,14 +673,22 @@ mod tests { processing_index, ) .unwrap(); - assert!(batch - .check_non_inclusion(&value, bloom_filter_stores[processing_index].as_mut_slice()) - .is_err()); + assert!(Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[processing_index] + ) + .is_err()); let other_index = if processing_index == 0 { 1 } else { 0 }; - assert!(batch - .check_non_inclusion(&value, bloom_filter_stores[other_index].as_mut_slice()) - .is_ok()); + assert!(Batch::check_non_inclusion( + batch.num_iters as usize, + batch.bloom_filter_capacity, + &value, + bloom_filter_stores[other_index] + ) + .is_ok()); } } diff --git a/program-libs/batched-merkle-tree/src/batch_metadata.rs b/program-libs/batched-merkle-tree/src/batch_metadata.rs index f72e0e4462..395f0c1426 100644 --- a/program-libs/batched-merkle-tree/src/batch_metadata.rs +++ b/program-libs/batched-merkle-tree/src/batch_metadata.rs @@ -37,6 +37,7 @@ pub struct BatchMetadata { pub currently_processing_batch_index: u64, /// Next batch to be inserted into the tree. pub next_full_batch_index: u64, + pub batches: [Batch; 2], } impl BatchMetadata { @@ -45,6 +46,14 @@ impl BatchMetadata { self.batch_size / self.zkp_batch_size } + pub fn get_current_batch(&self) -> &Batch { + &self.batches[self.currently_processing_batch_index as usize] + } + + pub fn get_current_batch_mut(&mut self) -> &mut Batch { + &mut self.batches[self.currently_processing_batch_index as usize] + } + /// Validates that the batch size is properly divisible by the ZKP batch size. fn validate_batch_sizes( batch_size: u64, @@ -68,7 +77,12 @@ impl BatchMetadata { batch_size, currently_processing_batch_index: 0, next_full_batch_index: 0, + // Output queues don't use bloom filters. bloom_filter_capacity: 0, + batches: [ + Batch::new(0, 0, batch_size, zkp_batch_size, 0), + Batch::new(0, 0, batch_size, zkp_batch_size, batch_size), + ], }) } @@ -77,6 +91,8 @@ impl BatchMetadata { bloom_filter_capacity: u64, zkp_batch_size: u64, num_batches: u64, + num_iters: u64, + start_index: u64, ) -> Result { Self::validate_batch_sizes(batch_size, zkp_batch_size)?; @@ -87,6 +103,22 @@ impl BatchMetadata { currently_processing_batch_index: 0, next_full_batch_index: 0, bloom_filter_capacity, + batches: [ + Batch::new( + num_iters, + bloom_filter_capacity, + batch_size, + zkp_batch_size, + start_index, + ), + Batch::new( + num_iters, + bloom_filter_capacity, + batch_size, + zkp_batch_size, + batch_size + start_index, + ), + ], }) } @@ -98,7 +130,8 @@ impl BatchMetadata { } /// Increment the currently_processing_batch_index if current state is BatchState::Full. - pub fn increment_currently_processing_batch_index_if_full(&mut self, state: BatchState) { + pub fn increment_currently_processing_batch_index_if_full(&mut self) { + let state = self.get_current_batch().get_state(); if state == BatchState::Full { self.currently_processing_batch_index = (self.currently_processing_batch_index + 1) % self.num_batches; @@ -151,19 +184,18 @@ impl BatchMetadata { } else { BatchedQueueMetadata::LEN }; - let batches_size = - ZeroCopySliceMutU64::::required_size_for_capacity(self.num_batches); + // let batches_size = + // ZeroCopySliceMutU64::::required_size_for_capacity(self.num_batches); let value_vecs_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(self.batch_size) * num_value_vec; // Bloomfilter capacity is in bits. let bloom_filter_stores_size = - ZeroCopySliceMutU64::::required_size_for_capacity(self.bloom_filter_capacity / 8) - * num_bloom_filter_stores; + (self.bloom_filter_capacity / 8) as usize * num_bloom_filter_stores; let hashchain_store_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(self.get_num_zkp_batches()) * num_hashchain_store; let size = account_size - + batches_size + // + batches_size + value_vecs_size + bloom_filter_stores_size + hashchain_store_size; @@ -173,7 +205,7 @@ impl BatchMetadata { #[test] fn test_increment_next_full_batch_index_if_inserted() { - let mut metadata = BatchMetadata::new_input_queue(10, 10, 10, 2).unwrap(); + let mut metadata = BatchMetadata::new_input_queue(10, 10, 10, 2, 3, 0).unwrap(); assert_eq!(metadata.next_full_batch_index, 0); // increment next full batch index metadata.increment_next_full_batch_index_if_inserted(BatchState::Inserted); @@ -188,30 +220,30 @@ fn test_increment_next_full_batch_index_if_inserted() { assert_eq!(metadata.next_full_batch_index, 0); } -#[test] -fn test_increment_currently_processing_batch_index_if_full() { - let mut metadata = BatchMetadata::new_input_queue(10, 10, 10, 2).unwrap(); - assert_eq!(metadata.currently_processing_batch_index, 0); - // increment currently_processing_batch_index - metadata.increment_currently_processing_batch_index_if_full(BatchState::Full); - assert_eq!(metadata.currently_processing_batch_index, 1); - // increment currently_processing_batch_index - metadata.increment_currently_processing_batch_index_if_full(BatchState::Full); - assert_eq!(metadata.currently_processing_batch_index, 0); - // try incrementing next full batch index with state not full - metadata.increment_currently_processing_batch_index_if_full(BatchState::Fill); - assert_eq!(metadata.currently_processing_batch_index, 0); - metadata.increment_currently_processing_batch_index_if_full(BatchState::Inserted); - assert_eq!(metadata.currently_processing_batch_index, 0); -} +// #[test] +// fn test_increment_currently_processing_batch_index_if_full() { +// let mut metadata = BatchMetadata::new_input_queue(10, 10, 10, 2).unwrap(); +// assert_eq!(metadata.currently_processing_batch_index, 0); +// // increment currently_processing_batch_index +// metadata.increment_currently_processing_batch_index_if_full(BatchState::Full); +// assert_eq!(metadata.currently_processing_batch_index, 1); +// // increment currently_processing_batch_index +// metadata.increment_currently_processing_batch_index_if_full(BatchState::Full); +// assert_eq!(metadata.currently_processing_batch_index, 0); +// // try incrementing next full batch index with state not full +// metadata.increment_currently_processing_batch_index_if_full(BatchState::Fill); +// assert_eq!(metadata.currently_processing_batch_index, 0); +// metadata.increment_currently_processing_batch_index_if_full(BatchState::Inserted); +// assert_eq!(metadata.currently_processing_batch_index, 0); +// } #[test] fn test_batch_size_validation() { // Test invalid batch size - assert!(BatchMetadata::new_input_queue(10, 10, 3, 2).is_err()); + assert!(BatchMetadata::new_input_queue(10, 10, 3, 2, 3, 0).is_err()); assert!(BatchMetadata::new_output_queue(10, 3, 2).is_err()); // Test valid batch size - assert!(BatchMetadata::new_input_queue(9, 10, 3, 2).is_ok()); + assert!(BatchMetadata::new_input_queue(9, 10, 3, 2, 3, 0).is_ok()); assert!(BatchMetadata::new_output_queue(9, 3, 2).is_ok()); } diff --git a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs index 8f4accca38..f767a39984 100644 --- a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs +++ b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs @@ -11,6 +11,7 @@ use light_utils::{ use solana_program::{account_info::AccountInfo, msg}; use crate::{ + batch::Batch, batch_metadata::BatchMetadata, constants::{ DEFAULT_BATCH_SIZE, DEFAULT_BATCH_STATE_TREE_HEIGHT, DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, @@ -312,14 +313,12 @@ pub fn get_state_merkle_tree_account_size_from_params( pub fn assert_state_mt_zero_copy_inited( account_data: &mut [u8], ref_account: crate::merkle_tree_metadata::BatchedMerkleTreeMetadata, - num_iters: u64, ) { let account = BatchedMerkleTreeAccount::state_from_bytes(account_data) .expect("from_bytes_unchecked_mut failed"); _assert_mt_zero_copy_inited::<{ crate::constants::BATCHED_STATE_TREE_TYPE }>( account, ref_account, - num_iters, TreeType::BatchedState as u64, ); } @@ -328,7 +327,6 @@ pub fn assert_state_mt_zero_copy_inited( pub fn assert_address_mt_zero_copy_inited( account_data: &mut [u8], ref_account: crate::merkle_tree_metadata::BatchedMerkleTreeMetadata, - num_iters: u64, ) { use crate::{constants::BATCHED_ADDRESS_TREE_TYPE, merkle_tree::BatchedMerkleTreeAccount}; @@ -337,7 +335,6 @@ pub fn assert_address_mt_zero_copy_inited( _assert_mt_zero_copy_inited::( account, ref_account, - num_iters, TreeType::Address as u64, ); } @@ -346,7 +343,6 @@ pub fn assert_address_mt_zero_copy_inited( fn _assert_mt_zero_copy_inited( mut account: BatchedMerkleTreeAccount, ref_account: crate::merkle_tree_metadata::BatchedMerkleTreeMetadata, - num_iters: u64, tree_type: u64, ) { use light_hasher::Hasher; @@ -354,7 +350,6 @@ fn _assert_mt_zero_copy_inited( let queue = account.queue_metadata; let ref_queue = ref_account.queue_metadata; let num_batches = ref_queue.num_batches as usize; - let mut next_index = account.next_index; assert_eq!(*account, ref_account, "metadata mismatch"); assert_eq!( @@ -382,10 +377,6 @@ fn _assert_mt_zero_copy_inited( "hashchain_store mismatch" ); - if tree_type == TreeType::BatchedAddress as u64 { - next_index = 2; - } - let queue_type = if tree_type == TreeType::BatchedState as u64 { QueueType::BatchedInput as u64 } else { @@ -396,11 +387,7 @@ fn _assert_mt_zero_copy_inited( ref_queue, queue_type, &mut account.value_vecs, - &mut account.bloom_filter_stores, - &mut account.batches, num_batches, - num_iters, - next_index, ); } diff --git a/program-libs/batched-merkle-tree/src/merkle_tree.rs b/program-libs/batched-merkle-tree/src/merkle_tree.rs index 99da3fb078..964f2774a3 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree.rs @@ -85,9 +85,9 @@ pub struct InstructionDataBatchAppendInputs { pub struct BatchedMerkleTreeAccount<'a> { metadata: Ref<&'a mut [u8], BatchedMerkleTreeMetadata>, pub root_history: ZeroCopyCyclicVecU64<'a, [u8; 32]>, - pub batches: ZeroCopySliceMutU64<'a, Batch>, + // pub batches: ZeroCopySliceMutU64<'a, Batch>, pub value_vecs: Vec>, - pub bloom_filter_stores: Vec>, + pub bloom_filter_stores: Vec<&'a mut [u8]>, pub hashchain_store: Vec>, } @@ -200,7 +200,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { } let (root_history, account_data) = ZeroCopyCyclicVecU64::from_bytes_at(account_data)?; - let (batches, value_vecs, bloom_filter_stores, hashchain_store) = input_queue_from_bytes( + let (value_vecs, bloom_filter_stores, hashchain_store) = input_queue_from_bytes( &metadata.queue_metadata, account_data, QueueType::BatchedInput as u64, @@ -209,7 +209,6 @@ impl<'a> BatchedMerkleTreeAccount<'a> { Ok(BatchedMerkleTreeAccount { metadata, root_history, - batches, value_vecs, bloom_filter_stores, hashchain_store, @@ -246,6 +245,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { input_queue_batch_size, input_queue_zkp_batch_size, )?; + account_metadata.queue_metadata.bloom_filter_capacity = bloom_filter_capacity; if account_data_len != account_metadata.get_account_size()? { msg!("merkle_tree_metadata: {:?}", account_metadata); @@ -277,8 +277,24 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // The initialized indexed Merkle tree contains two elements. account_metadata.next_index = 2; } + let next_index = account_metadata.next_index; + println!("next_index: {}", next_index); + for (i, batches) in account_metadata + .queue_metadata + .batches + .iter_mut() + .enumerate() + { + *batches = Batch::new( + num_iters, + bloom_filter_capacity, + input_queue_batch_size, + input_queue_zkp_batch_size, + input_queue_batch_size * (i as u64) + next_index, + ); + } - let (batches, value_vecs, bloom_filter_stores, hashchain_store) = init_queue( + let (value_vecs, bloom_filter_stores, hashchain_store, _) = init_queue( &account_metadata.queue_metadata, QueueType::BatchedInput as u64, account_data, @@ -289,7 +305,6 @@ impl<'a> BatchedMerkleTreeAccount<'a> { Ok(BatchedMerkleTreeAccount { metadata: account_metadata, root_history, - batches, value_vecs, bloom_filter_stores, hashchain_store, @@ -343,8 +358,8 @@ impl<'a> BatchedMerkleTreeAccount<'a> { let new_root = instruction_data.new_root; let circuit_batch_size = queue_account.batch_metadata.zkp_batch_size; let start_index = self.next_index; - let full_batch = &mut queue_account.batches[full_batch_index]; - let num_zkps = full_batch.get_first_ready_zkp_batch()?; + let num_zkps = + queue_account.batch_metadata.batches[full_batch_index].get_first_ready_zkp_batch()?; // 1. Create public inputs hash. let public_input_hash = { @@ -376,12 +391,15 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // Update metadata and batch. { + println!("pre mark_as_inserted_in_merkle_tree -------------------------------"); // 3. Mark zkp batch as inserted in the merkle tree. - let full_batch_state = full_batch.mark_as_inserted_in_merkle_tree( - self.metadata.sequence_number, - root_index, - self.root_history_capacity, - )?; + let full_batch_state = queue_account.batch_metadata.batches[full_batch_index] + .mark_as_inserted_in_merkle_tree( + self.metadata.sequence_number, + root_index, + self.root_history_capacity, + )?; + println!("full_batch_state: {:?}", full_batch_state); // 4. Increment next full batch index if inserted. queue_account .batch_metadata @@ -445,7 +463,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { id: [u8; 32], ) -> Result { let full_batch_index = self.queue_metadata.next_full_batch_index as usize; - let num_zkps = self.batches[full_batch_index].get_first_ready_zkp_batch()?; + let num_zkps = self.queue_metadata.batches[full_batch_index].get_first_ready_zkp_batch()?; let new_root = instruction_data.new_root; let circuit_batch_size = self.queue_metadata.zkp_batch_size; @@ -488,11 +506,12 @@ impl<'a> BatchedMerkleTreeAccount<'a> { let root_history_capacity = self.root_history_capacity; let sequence_number = self.sequence_number; // 3. Mark batch as inserted in the merkle tree. - let full_batch_state = self.batches[full_batch_index].mark_as_inserted_in_merkle_tree( - sequence_number, - root_index, - root_history_capacity, - )?; + let full_batch_state = self.queue_metadata.batches[full_batch_index] + .mark_as_inserted_in_merkle_tree( + sequence_number, + root_index, + root_history_capacity, + )?; // 4. Zero out previous batch bloom filter // if current batch is 50% inserted. @@ -619,7 +638,6 @@ impl<'a> BatchedMerkleTreeAccount<'a> { let (root_index, sequence_number) = insert_into_current_batch( QueueType::BatchedInput as u64, &mut self.metadata.queue_metadata, - &mut self.batches, &mut self.value_vecs, &mut self.bloom_filter_stores, &mut self.hashchain_store, @@ -749,13 +767,9 @@ impl<'a> BatchedMerkleTreeAccount<'a> { } else { previous_full_batch_index }; - let current_batch_is_half_full = { - let num_inserted_elements = self - .batches - .get(current_batch) - .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)? - .get_num_inserted_elements(); + let num_inserted_elements = + self.queue_metadata.batches[current_batch].get_num_inserted_elements(); // Keep for finegrained unit test println!("current_batch: {}", current_batch); println!("previous_full_batch_index: {}", previous_full_batch_index); @@ -768,13 +782,20 @@ impl<'a> BatchedMerkleTreeAccount<'a> { ); num_inserted_elements >= batch_size / 2 }; + println!("current_batch is half full: {}", current_batch_is_half_full); let previous_full_batch = self + .queue_metadata .batches .get_mut(previous_full_batch_index) .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)?; let batch_is_inserted = previous_full_batch.get_state() == BatchState::Inserted; + println!("previous batch is inserted: {}", batch_is_inserted); + println!( + "previous batch bloom filter is zeroed: {}", + previous_full_batch.bloom_filter_is_zeroed() + ); let previous_batch_is_ready = batch_is_inserted && !previous_full_batch.bloom_filter_is_zeroed(); @@ -783,22 +804,23 @@ impl<'a> BatchedMerkleTreeAccount<'a> { println!("Wiping bloom filter of previous batch"); println!("current_batch: {}", current_batch); println!("previous_full_batch_index: {}", previous_full_batch_index); + // 3.2 Mark bloom filter zeroed. + previous_full_batch.set_bloom_filter_to_zeroed(); + let seq = previous_full_batch.sequence_number; + let root_index = previous_full_batch.root_index; // 3.1 Zero out bloom filter. { let bloom_filter = self .bloom_filter_stores .get_mut(previous_full_batch_index) .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)?; - bloom_filter.as_mut_slice().iter_mut().for_each(|x| *x = 0); + bloom_filter.iter_mut().for_each(|x| *x = 0); } - // 3.2 Mark bloom filter zeroed. - previous_full_batch.set_bloom_filter_to_zeroed(); + // 3.3 Zero out roots if a root exists in root history // which allows to prove inclusion of a value // that was inserted into the bloom filter just zeroed out. { - let seq = previous_full_batch.sequence_number; - let root_index = previous_full_batch.root_index; self.zero_out_roots(seq, root_index); } } @@ -833,13 +855,24 @@ impl<'a> BatchedMerkleTreeAccount<'a> { &mut self, value: &[u8; 32], ) -> Result<(), BatchedMerkleTreeError> { - let num_bloom_filters = self.bloom_filter_stores.len(); - for i in 0..num_bloom_filters { - let bloom_filter_store = self.bloom_filter_stores[i].as_mut_slice(); - let batch = &self.batches[i]; - if !batch.bloom_filter_is_zeroed() { - batch.check_non_inclusion(value, bloom_filter_store)?; - } + // for (batch, bloom_filter_store) in self + // .queue_metadata + // .batches + // .iter() + // .zip(self.bloom_filter_stores.iter_mut()) + // { + // if !batch.bloom_filter_is_zeroed() { + // batch.check_non_inclusion(value, *bloom_filter_store)?; + // } + // } + + for i in 0..2 { + Batch::check_non_inclusion( + self.queue_metadata.batches[i].num_iters as usize, + self.queue_metadata.batches[i].bloom_filter_capacity, + value, + self.bloom_filter_stores[i], + )?; } Ok(()) } @@ -909,7 +942,11 @@ pub fn assert_nullify_event( mt_pubkey: Pubkey, ) { let batch_index = old_account.queue_metadata.next_full_batch_index; - let batch = old_account.batches.get(batch_index as usize).unwrap(); + let batch = old_account + .queue_metadata + .batches + .get(batch_index as usize) + .unwrap(); let ref_event = BatchNullifyEvent { id: mt_pubkey.to_bytes(), batch_index, @@ -933,6 +970,7 @@ pub fn assert_batch_append_event_event( .batch_metadata .next_full_batch_index; let batch = old_output_queue_account + .batch_metadata .batches .get(batch_index as usize) .unwrap(); diff --git a/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs b/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs index 08429a2fb0..9e0af07b82 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs @@ -120,6 +120,7 @@ impl BatchedMerkleTreeMetadata { root_history_capacity, height, num_batches, + num_iters, } = params; Self { metadata: MerkleTreeMetadata { @@ -145,6 +146,12 @@ impl BatchedMerkleTreeMetadata { bloom_filter_capacity, zkp_batch_size, num_batches, + num_iters, + if tree_type == TreeType::BatchedAddress { + 2 + } else { + 0 + }, ) .unwrap(), capacity: 2u64.pow(height), @@ -166,6 +173,7 @@ pub struct CreateTreeParams { pub root_history_capacity: u32, pub height: u32, pub num_batches: u64, + pub num_iters: u64, } impl CreateTreeParams { pub fn from_state_ix_params(data: InitStateTreeAccountsInstructionData, owner: Pubkey) -> Self { @@ -182,6 +190,7 @@ impl CreateTreeParams { root_history_capacity: data.root_history_capacity, height: data.height, num_batches: data.input_queue_num_batches, + num_iters: data.bloom_filter_num_iters, } } @@ -202,6 +211,7 @@ impl CreateTreeParams { root_history_capacity: data.root_history_capacity, height: data.height, num_batches: data.input_queue_num_batches, + num_iters: data.bloom_filter_num_iters, } } } diff --git a/program-libs/batched-merkle-tree/src/queue.rs b/program-libs/batched-merkle-tree/src/queue.rs index fcaea2091f..6f3e14b9fd 100644 --- a/program-libs/batched-merkle-tree/src/queue.rs +++ b/program-libs/batched-merkle-tree/src/queue.rs @@ -10,7 +10,7 @@ use light_utils::{ account::{check_account_info, check_discriminator, set_discriminator, DISCRIMINATOR_LEN}, pubkey::Pubkey, }; -use light_zero_copy::{errors::ZeroCopyError, slice_mut::ZeroCopySliceMutU64, vec::ZeroCopyVecU64}; +use light_zero_copy::{errors::ZeroCopyError, vec::ZeroCopyVecU64}; use solana_program::{account_info::AccountInfo, msg}; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, Ref}; @@ -63,11 +63,21 @@ impl BatchedQueueMetadata { batch_size: u64, zkp_batch_size: u64, bloom_filter_capacity: u64, + num_iters: u64, ) -> Result<(), BatchedMerkleTreeError> { self.metadata = meta_data; self.batch_metadata .init(num_batches, batch_size, zkp_batch_size)?; self.batch_metadata.bloom_filter_capacity = bloom_filter_capacity; + for (i, batches) in self.batch_metadata.batches.iter_mut().enumerate() { + *batches = Batch::new( + num_iters, + bloom_filter_capacity, + batch_size, + zkp_batch_size, + batch_size * (i as u64), + ); + } Ok(()) } } @@ -117,9 +127,7 @@ impl BatchedQueueMetadata { #[derive(Debug, PartialEq)] pub struct BatchedQueueAccount<'a> { metadata: Ref<&'a mut [u8], BatchedQueueMetadata>, - pub batches: ZeroCopySliceMutU64<'a, Batch>, pub value_vecs: Vec>, - pub bloom_filter_stores: Vec>, pub hashchain_store: Vec>, } @@ -179,20 +187,14 @@ impl<'a> BatchedQueueAccount<'a> { if metadata.metadata.queue_type != QUEUE_TYPE { return Err(MerkleTreeMetadataError::InvalidQueueType.into()); } - let (num_value_stores, num_stores, num_hashchain_stores) = + let (num_value_stores, _num_stores, num_hashchain_stores) = metadata.get_size_parameters()?; - let (batches, value_vecs, bloom_filter_stores, hashchain_store) = output_queue_from_bytes( - num_value_stores, - num_stores, - num_hashchain_stores, - account_data, - )?; + let (value_vecs, hashchain_store) = + output_queue_from_bytes(num_value_stores, num_hashchain_stores, account_data)?; Ok(BatchedQueueAccount { metadata, - batches, value_vecs, - bloom_filter_stores, hashchain_store, }) } @@ -220,7 +222,9 @@ impl<'a> BatchedQueueAccount<'a> { output_queue_batch_size, output_queue_zkp_batch_size, bloom_filter_capacity, + num_iters, )?; + if account_data_len != account_metadata .batch_metadata @@ -236,7 +240,7 @@ impl<'a> BatchedQueueAccount<'a> { return Err(ZeroCopyError::InvalidAccountSize.into()); } - let (batches, value_vecs, bloom_filter_stores, hashchain_store) = init_queue( + let (value_vecs, _bloom_filter_stores, hashchain_store, _) = init_queue( &account_metadata.batch_metadata, account_metadata.metadata.queue_type, account_data, @@ -246,9 +250,7 @@ impl<'a> BatchedQueueAccount<'a> { )?; Ok(BatchedQueueAccount { metadata: account_metadata, - batches, value_vecs, - bloom_filter_stores, hashchain_store, }) } @@ -266,9 +268,8 @@ impl<'a> BatchedQueueAccount<'a> { insert_into_current_batch( self.metadata.metadata.queue_type, &mut self.metadata.batch_metadata, - &mut self.batches, &mut self.value_vecs, - self.bloom_filter_stores.as_mut_slice(), + &mut [], &mut self.hashchain_store, hash_chain_value, None, @@ -293,7 +294,7 @@ impl<'a> BatchedQueueAccount<'a> { leaf_index: u64, hash_chain_value: &[u8; 32], ) -> Result { - for (batch_index, batch) in self.batches.iter().enumerate() { + for (batch_index, batch) in self.batch_metadata.batches.iter().enumerate() { if batch.leaf_index_could_exist_in_batch(leaf_index)? { let index = batch.get_value_index_in_batch(leaf_index)?; let element = self.value_vecs[batch_index] @@ -319,7 +320,7 @@ impl<'a> BatchedQueueAccount<'a> { &mut self, leaf_index: u64, ) -> Result<(), BatchedMerkleTreeError> { - for batch in self.batches.iter() { + for batch in self.batch_metadata.batches.iter() { let res = batch.leaf_index_could_exist_in_batch(leaf_index)?; if res { return Ok(()); @@ -336,7 +337,7 @@ impl<'a> BatchedQueueAccount<'a> { leaf_index: u64, hash_chain_value: &[u8; 32], ) -> Result<(), BatchedMerkleTreeError> { - for (batch_index, batch) in self.batches.iter().enumerate() { + for (batch_index, batch) in self.batch_metadata.batches.iter().enumerate() { if batch.leaf_index_could_exist_in_batch(leaf_index)? { let index = batch.get_value_index_in_batch(leaf_index)?; let element = self.value_vecs[batch_index] @@ -365,7 +366,7 @@ impl<'a> BatchedQueueAccount<'a> { /// Returns the number of elements inserted in the current batch. pub fn get_num_inserted_in_current_batch(&self) -> u64 { let next_full_batch = self.batch_metadata.currently_processing_batch_index as usize; - self.batches[next_full_batch].get_num_inserted_elements() + self.batch_metadata.batches[next_full_batch].get_num_inserted_elements() } /// Returns true if the pubkey is the associated Merkle tree of the queue. @@ -423,9 +424,8 @@ impl DerefMut for BatchedQueueAccount<'_> { pub(crate) fn insert_into_current_batch( queue_type: u64, batch_metadata: &mut BatchMetadata, - batches: &mut ZeroCopySliceMutU64, value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], - bloom_filter_stores: &mut [ZeroCopySliceMutU64], + bloom_filter_stores: &mut [&mut [u8]], hashchain_store: &mut [ZeroCopyVecU64<[u8; 32]>], hash_chain_value: &[u8; 32], bloom_filter_value: Option<&[u8; 32]>, @@ -436,9 +436,7 @@ pub(crate) fn insert_into_current_batch( let batch_index = batch_metadata.currently_processing_batch_index as usize; let mut value_store = value_vecs.get_mut(batch_index); let mut hashchain_store = hashchain_store.get_mut(batch_index); - let current_batch = batches - .get_mut(batch_index) - .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)?; + let current_batch = batch_metadata.get_current_batch_mut(); // 1. Check that the current batch is ready. // 1.1. If the current batch is inserted, clear the batch. { @@ -477,7 +475,7 @@ pub(crate) fn insert_into_current_batch( } } else { // We expect to insert into the current batch. - for batch in batches.iter_mut() { + for batch in batch_metadata.batches.iter() { msg!("batch {:?}", batch); } return Err(BatchedMerkleTreeError::BatchNotReady); @@ -503,7 +501,7 @@ pub(crate) fn insert_into_current_batch( }?; // 3. If batch is full, increment currently_processing_batch_index. - batch_metadata.increment_currently_processing_batch_index_if_full(current_batch.get_state()); + batch_metadata.increment_currently_processing_batch_index_if_full(); Ok((root_index, sequence_number)) } @@ -511,26 +509,20 @@ pub(crate) fn insert_into_current_batch( #[allow(clippy::type_complexity)] pub(crate) fn output_queue_from_bytes( num_value_stores: usize, - num_stores: usize, num_hashchain_stores: usize, account_data: &mut [u8], ) -> Result< ( - ZeroCopySliceMutU64<'_, Batch>, Vec>, - Vec>, Vec>, ), BatchedMerkleTreeError, > { - let (batches, account_data) = ZeroCopySliceMutU64::from_bytes_at(account_data)?; let (value_vecs, account_data) = ZeroCopyVecU64::from_bytes_at_multiple(num_value_stores, account_data)?; - let (bloom_filter_stores, account_data) = - ZeroCopySliceMutU64::from_bytes_at_multiple(num_stores, account_data)?; let (hashchain_store, _) = ZeroCopyVecU64::from_bytes_at_multiple(num_hashchain_stores, account_data)?; - Ok((batches, value_vecs, bloom_filter_stores, hashchain_store)) + Ok((value_vecs, hashchain_store)) } #[allow(clippy::type_complexity)] @@ -540,26 +532,26 @@ pub(crate) fn input_queue_from_bytes<'a>( queue_type: u64, ) -> Result< ( - ZeroCopySliceMutU64<'a, Batch>, Vec>, - Vec>, + Vec<&'a mut [u8]>, Vec>, ), BatchedMerkleTreeError, > { - let (num_value_stores, num_stores, hashchain_store_capacity) = + let (num_value_stores, _, hashchain_store_capacity) = batch_metadata.get_size_parameters(queue_type)?; - let (batches, account_data) = ZeroCopySliceMutU64::from_bytes_at(account_data)?; let (value_vecs, account_data) = ZeroCopyVecU64::from_bytes_at_multiple(num_value_stores, account_data)?; - let (bloom_filter_stores, account_data) = - ZeroCopySliceMutU64::from_bytes_at_multiple(num_stores, account_data)?; + let (bloom_filter_stores, account_data) = fun_name( + (batch_metadata.bloom_filter_capacity / 8) as usize, + account_data, + ); let (hashchain_store, _) = ZeroCopyVecU64::from_bytes_at_multiple(hashchain_store_capacity, account_data)?; - Ok((batches, value_vecs, bloom_filter_stores, hashchain_store)) + Ok((value_vecs, bloom_filter_stores, hashchain_store)) } #[allow(clippy::type_complexity)] @@ -572,44 +564,53 @@ pub(crate) fn init_queue<'a>( batch_start_index: u64, ) -> Result< ( - ZeroCopySliceMutU64<'a, Batch>, Vec>, - Vec>, + Vec<&'a mut [u8]>, Vec>, + &'a mut [u8], ), BatchedMerkleTreeError, > { - let (num_value_stores, num_stores, num_hashchain_stores) = + let (num_value_stores, _num_stores, num_hashchain_stores) = batch_metadata.get_size_parameters(queue_type)?; - let (mut batches, account_data) = - ZeroCopySliceMutU64::new_at(batch_metadata.num_batches, account_data)?; - - for i in 0..batch_metadata.num_batches { - batches[i as usize] = Batch::new( - num_iters, - bloom_filter_capacity, - batch_metadata.batch_size, - batch_metadata.zkp_batch_size, - batch_metadata.batch_size * i + batch_start_index, - ); - } let (value_vecs, account_data) = ZeroCopyVecU64::new_at_multiple(num_value_stores, batch_metadata.batch_size, account_data)?; - let (bloom_filter_stores, account_data) = ZeroCopySliceMutU64::new_at_multiple( - num_stores, - batch_metadata.bloom_filter_capacity / 8, + // let (bloom_filter_stores, account_data) = ZeroCopySliceMutU64::new_at_multiple( + // num_stores, + // batch_metadata.bloom_filter_capacity / 8, + // account_data, + // )?; + let (bloom_filter_stores, account_data) = fun_name( + (batch_metadata.bloom_filter_capacity / 8) as usize, account_data, - )?; - - let (hashchain_store, _) = ZeroCopyVecU64::new_at_multiple( + ); + let (hashchain_store, account_data) = ZeroCopyVecU64::new_at_multiple( num_hashchain_stores, batch_metadata.get_num_zkp_batches(), account_data, )?; - Ok((batches, value_vecs, bloom_filter_stores, hashchain_store)) + Ok(( + value_vecs, + bloom_filter_stores, + hashchain_store, + account_data, + )) +} + +fn fun_name( + bloom_filter_capacity: usize, + mut account_data: &mut [u8], +) -> (Vec<&mut [u8]>, &mut [u8]) { + let mut bloom_filter_stores = Vec::with_capacity(2); + for _ in 0..2 { + let (slice, _bytes) = account_data.split_at_mut(bloom_filter_capacity); + account_data = _bytes; + bloom_filter_stores.push(slice); + } + (bloom_filter_stores, account_data) } pub fn get_output_queue_account_size_default() -> usize { @@ -679,28 +680,12 @@ pub fn assert_queue_inited( ref_batch_metadata: BatchMetadata, queue_type: u64, value_vecs: &mut Vec>, - bloom_filter_stores: &mut Vec>, - batches: &mut ZeroCopySliceMutU64<'_, Batch>, num_batches: usize, - num_iters: u64, - start_index: u64, ) { assert_eq!( batch_metadata, ref_batch_metadata, "batch_metadata mismatch" ); - assert_eq!(batches.len(), num_batches, "batches mismatch"); - for (i, batch) in batches.iter().enumerate() { - let ref_batch = Batch::new( - num_iters, - ref_batch_metadata.bloom_filter_capacity, - ref_batch_metadata.batch_size, - ref_batch_metadata.zkp_batch_size, - ref_batch_metadata.batch_size * i as u64 + start_index, - ); - - assert_eq!(batch, &ref_batch, "batch mismatch"); - } if queue_type == QueueType::BatchedOutput as u64 { assert_eq!(value_vecs.capacity(), num_batches, "value_vecs mismatch"); @@ -710,33 +695,6 @@ pub fn assert_queue_inited( assert_eq!(value_vecs.capacity(), 0, "value_vecs mismatch"); } - if queue_type == QueueType::BatchedOutput as u64 { - assert_eq!( - bloom_filter_stores.capacity(), - 0, - "bloom_filter_stores mismatch" - ); - } else { - assert_eq!( - bloom_filter_stores.capacity(), - num_batches, - "bloom_filter_stores mismatch" - ); - assert_eq!( - bloom_filter_stores.len(), - num_batches, - "bloom_filter_stores mismatch" - ); - } - - for vec in bloom_filter_stores { - assert_eq!( - vec.len() * 8, - batch_metadata.bloom_filter_capacity as usize, - "bloom_filter_capacity mismatch" - ); - } - for vec in value_vecs.iter() { assert_eq!( vec.capacity(), @@ -748,17 +706,12 @@ pub fn assert_queue_inited( } #[cfg(not(target_os = "solana"))] -pub fn assert_queue_zero_copy_inited( - account_data: &mut [u8], - ref_account: BatchedQueueMetadata, - num_iters: u64, -) { +pub fn assert_queue_zero_copy_inited(account_data: &mut [u8], ref_account: BatchedQueueMetadata) { let mut account = BatchedQueueAccount::output_from_bytes(account_data) .expect("from_bytes_unchecked_mut failed"); let num_batches = ref_account.batch_metadata.num_batches as usize; let batch_metadata = account.batch_metadata; let queue_type = account.metadata.metadata.queue_type; - let next_index = account.next_index; assert_eq!( account.metadata.metadata, ref_account.metadata, "metadata mismatch" @@ -768,10 +721,6 @@ pub fn assert_queue_zero_copy_inited( ref_account.batch_metadata, queue_type, &mut account.value_vecs, - &mut account.bloom_filter_stores, - &mut account.batches, num_batches, - num_iters, - next_index, ); } diff --git a/program-libs/batched-merkle-tree/src/rollover_address_tree.rs b/program-libs/batched-merkle-tree/src/rollover_address_tree.rs index 2ecf6e1e3c..62f268001a 100644 --- a/program-libs/batched-merkle-tree/src/rollover_address_tree.rs +++ b/program-libs/batched-merkle-tree/src/rollover_address_tree.rs @@ -65,7 +65,7 @@ fn create_batched_address_tree_init_params( input_queue_batch_size: old_merkle_tree.queue_metadata.batch_size, input_queue_zkp_batch_size: old_merkle_tree.queue_metadata.zkp_batch_size, bloom_filter_capacity: old_merkle_tree.queue_metadata.bloom_filter_capacity, - bloom_filter_num_iters: old_merkle_tree.batches[0].num_iters, + bloom_filter_num_iters: old_merkle_tree.queue_metadata.batches[0].num_iters, root_history_capacity: old_merkle_tree.root_history_capacity, network_fee, rollover_threshold: if_equals_none( @@ -91,7 +91,6 @@ pub fn assert_address_mt_roll_over( mut new_mt_account_data: Vec, new_ref_mt_account: crate::merkle_tree_metadata::BatchedMerkleTreeMetadata, new_mt_pubkey: Pubkey, - bloom_filter_num_iters: u64, ) { old_ref_mt_account .metadata @@ -104,6 +103,5 @@ pub fn assert_address_mt_roll_over( crate::initialize_state_tree::assert_address_mt_zero_copy_inited( &mut new_mt_account_data, new_ref_mt_account, - bloom_filter_num_iters, ); } diff --git a/program-libs/batched-merkle-tree/src/rollover_state_tree.rs b/program-libs/batched-merkle-tree/src/rollover_state_tree.rs index dafb08d62e..a56e65da58 100644 --- a/program-libs/batched-merkle-tree/src/rollover_state_tree.rs +++ b/program-libs/batched-merkle-tree/src/rollover_state_tree.rs @@ -105,7 +105,7 @@ impl From<&RolloverBatchStateTreeParams<'_>> for InitStateTreeAccountsInstructio input_queue_batch_size: params.old_merkle_tree.queue_metadata.batch_size, input_queue_zkp_batch_size: params.old_merkle_tree.queue_metadata.zkp_batch_size, bloom_filter_capacity: params.old_merkle_tree.queue_metadata.bloom_filter_capacity, - bloom_filter_num_iters: params.old_merkle_tree.batches[0].num_iters, + bloom_filter_num_iters: params.old_merkle_tree.queue_metadata.batches[0].num_iters, root_history_capacity: params.old_merkle_tree.root_history_capacity, network_fee: params.network_fee, rollover_threshold: if_equals_none( @@ -128,7 +128,7 @@ impl From<&RolloverBatchStateTreeParams<'_>> for InitStateTreeAccountsInstructio additional_bytes: params.additional_bytes, output_queue_batch_size: params.old_output_queue.batch_metadata.batch_size, output_queue_zkp_batch_size: params.old_output_queue.batch_metadata.zkp_batch_size, - output_queue_num_batches: params.old_output_queue.batches.len() as u64, + output_queue_num_batches: params.old_output_queue.batch_metadata.batches.len() as u64, } } } @@ -164,7 +164,6 @@ pub struct StateMtRollOverAssertParams { pub new_mt_account_data: Vec, pub old_mt_pubkey: Pubkey, pub new_mt_pubkey: Pubkey, - pub bloom_filter_num_iters: u64, pub ref_rolledover_mt: BatchedMerkleTreeMetadata, pub queue_account_data: Vec, pub ref_queue_account: BatchedQueueMetadata, @@ -183,7 +182,7 @@ pub fn assert_state_mt_roll_over(params: StateMtRollOverAssertParams) { new_mt_account_data, old_mt_pubkey, new_mt_pubkey, - bloom_filter_num_iters, + ref_rolledover_mt, mut queue_account_data, ref_queue_account, @@ -203,7 +202,7 @@ pub fn assert_state_mt_roll_over(params: StateMtRollOverAssertParams) { .rollover_metadata .rolledover_slot = slot; - crate::queue::assert_queue_zero_copy_inited(&mut new_queue_account_data, ref_queue_account, 0); + crate::queue::assert_queue_zero_copy_inited(&mut new_queue_account_data, ref_queue_account); let zero_copy_queue = BatchedQueueAccount::output_from_bytes(&mut queue_account_data).unwrap(); assert_eq!(zero_copy_queue.metadata, ref_rolledover_queue.metadata); @@ -212,7 +211,6 @@ pub fn assert_state_mt_roll_over(params: StateMtRollOverAssertParams) { ref_mt_account, new_mt_account_data, new_mt_pubkey, - bloom_filter_num_iters, ref_rolledover_mt, old_queue_pubkey, slot, @@ -228,7 +226,6 @@ pub struct MtRollOverAssertParams { pub ref_mt_account: BatchedMerkleTreeMetadata, pub new_mt_account_data: Vec, pub new_mt_pubkey: Pubkey, - pub bloom_filter_num_iters: u64, pub ref_rolledover_mt: BatchedMerkleTreeMetadata, pub old_queue_pubkey: Pubkey, pub slot: u64, @@ -241,7 +238,6 @@ pub fn assert_mt_roll_over(params: MtRollOverAssertParams) { ref_mt_account, mut new_mt_account_data, new_mt_pubkey, - bloom_filter_num_iters, mut ref_rolledover_mt, old_queue_pubkey, slot, @@ -258,6 +254,5 @@ pub fn assert_mt_roll_over(params: MtRollOverAssertParams) { crate::initialize_state_tree::assert_state_mt_zero_copy_inited( &mut new_mt_account_data, ref_mt_account, - bloom_filter_num_iters, ); } diff --git a/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs b/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs index 02e7a52a39..e6b659478e 100644 --- a/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs +++ b/program-libs/batched-merkle-tree/tests/initialize_address_tree.rs @@ -27,11 +27,7 @@ fn test_account_init() { init_batched_address_merkle_tree_account(owner, params, &mut mt_account_data, merkle_tree_rent) .unwrap(); - assert_address_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); } #[test] @@ -67,7 +63,7 @@ fn test_rnd_account_init() { rollover_threshold: Some(rng.gen_range(0..100)), close_threshold: None, root_history_capacity: rng.gen_range(1..1000), - input_queue_num_batches: rng.gen_range(1..4), + input_queue_num_batches: 2, height: rng.gen_range(1..32), }; @@ -82,11 +78,7 @@ fn test_rnd_account_init() { { let num_zkp_batches = params.input_queue_batch_size / params.input_queue_zkp_batch_size; let num_batches = params.input_queue_num_batches as usize; - let batch_size = - ZeroCopySliceMutU64::::required_size_for_capacity(num_batches as u64); - let bloom_filter_size = ZeroCopySliceMutU64::::required_size_for_capacity( - params.bloom_filter_capacity / 8, - ) * num_batches; + let bloom_filter_size = ((params.bloom_filter_capacity / 8) * 2u64) as usize; let hash_chain_store_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(num_zkp_batches) * num_batches; @@ -96,7 +88,6 @@ fn test_rnd_account_init() { // Output queue let ref_account_size = BatchedMerkleTreeMetadata::LEN + root_history_size - + batch_size + bloom_filter_size // 2 hash chain stores + hash_chain_store_size; @@ -116,10 +107,6 @@ fn test_rnd_account_init() { let mt_params = CreateTreeParams::from_address_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(mt_params, merkle_tree_rent); - assert_address_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); } } diff --git a/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs b/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs index b6ff2f86d3..ac4a59134e 100644 --- a/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs +++ b/program-libs/batched-merkle-tree/tests/initialize_state_tree.rs @@ -70,7 +70,6 @@ fn test_different_parameters() { assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, - 0, ); let mt_params = CreateTreeParams::from_state_ix_params(params, owner); let ref_mt_account = @@ -78,7 +77,6 @@ fn test_different_parameters() { assert_state_mt_zero_copy_inited( &mut mt_account_data, ref_mt_account, - params.bloom_filter_num_iters, ); } } @@ -123,14 +121,12 @@ fn test_account_init() { assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, - 0, ); let mt_params = CreateTreeParams::from_state_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(mt_params, output_queue_pubkey); assert_state_mt_zero_copy_inited( &mut mt_account_data, ref_mt_account, - params.bloom_filter_num_iters, ); } @@ -171,8 +167,8 @@ fn test_rnd_account_init() { rollover_threshold: Some(rng.gen_range(0..100)), close_threshold: None, root_history_capacity: rng.gen_range(1..1000), - input_queue_num_batches: rng.gen_range(1..4), - output_queue_num_batches: rng.gen_range(1..4), + input_queue_num_batches: 2, + output_queue_num_batches: 2, height: rng.gen_range(1..32), }; let queue_account_size = get_output_queue_account_size( @@ -185,9 +181,6 @@ fn test_rnd_account_init() { let num_batches = params.output_queue_num_batches as usize; let num_zkp_batches = params.output_queue_batch_size / params.output_queue_zkp_batch_size; - let batch_size = ZeroCopySliceMutU64::::required_size_for_capacity( - params.output_queue_num_batches, - ); let value_vec_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity( params.output_queue_batch_size, ) * num_batches; @@ -198,7 +191,6 @@ fn test_rnd_account_init() { let ref_queue_account_size = // metadata BatchedQueueMetadata::LEN - + batch_size // 2 value vecs + value_vec_size // 2 hash chain stores @@ -221,10 +213,7 @@ fn test_rnd_account_init() { { let num_zkp_batches = params.input_queue_batch_size / params.input_queue_zkp_batch_size; let num_batches = params.input_queue_num_batches; - let batch_size = ZeroCopySliceMutU64::::required_size_for_capacity(num_batches); - let bloom_filter_size = ZeroCopySliceMutU64::::required_size_for_capacity( - params.bloom_filter_capacity / 8, - ) * num_batches as usize; + let bloom_filter_size = ((params.bloom_filter_capacity / 8) * num_batches) as usize; let hash_chain_store_size = ZeroCopyVecU64::<[u8; 32]>::required_size_for_capacity(num_zkp_batches) * num_batches as usize; @@ -236,7 +225,7 @@ fn test_rnd_account_init() { // metadata BatchedMerkleTreeMetadata::LEN + root_history_size - + batch_size + + bloom_filter_size // 2 hash chain stores + hash_chain_store_size; @@ -270,7 +259,6 @@ fn test_rnd_account_init() { assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, - 0, ); let mt_params = CreateTreeParams::from_state_ix_params(params, owner); @@ -279,7 +267,6 @@ fn test_rnd_account_init() { assert_state_mt_zero_copy_inited( &mut mt_account_data, ref_mt_account, - params.bloom_filter_num_iters, ); } } diff --git a/program-libs/batched-merkle-tree/tests/merkle_tree.rs b/program-libs/batched-merkle-tree/tests/merkle_tree.rs index c983a03472..cae40b9ada 100644 --- a/program-libs/batched-merkle-tree/tests/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/tests/merkle_tree.rs @@ -1,4 +1,4 @@ -#![allow(unused_assignments)] +// #![allow(unused_assignments)] use std::cmp::min; use light_batched_merkle_tree::{ @@ -23,7 +23,7 @@ use light_batched_merkle_tree::{ }, merkle_tree_metadata::BatchedMerkleTreeMetadata, queue::{ - get_output_queue_account_size_default, get_output_queue_account_size_from_params, + self, get_output_queue_account_size_default, get_output_queue_account_size_from_params, BatchedQueueAccount, BatchedQueueMetadata, }, }; @@ -36,14 +36,13 @@ use light_prover_client::{ }; use light_utils::{hashchain::create_hash_chain_from_slice, pubkey::Pubkey}; use light_verifier::CompressedProof; -use light_zero_copy::{slice_mut::ZeroCopySliceMutU64, vec::ZeroCopyVecU64}; +use light_zero_copy::vec::ZeroCopyVecU64; use rand::{rngs::StdRng, Rng}; use serial_test::serial; #[allow(clippy::too_many_arguments)] pub fn assert_nullifier_queue_insert( pre_account: BatchedMerkleTreeMetadata, - pre_batches: ZeroCopySliceMutU64, pre_value_vecs: &mut Vec>, pre_roots: Vec<[u8; 32]>, pre_hashchains: Vec>, @@ -63,7 +62,6 @@ pub fn assert_nullifier_queue_insert( } assert_input_queue_insert( pre_account, - pre_batches, pre_value_vecs, pre_roots, pre_hashchains, @@ -80,7 +78,6 @@ pub fn assert_nullifier_queue_insert( #[allow(clippy::too_many_arguments)] pub fn assert_input_queue_insert( mut pre_account: BatchedMerkleTreeMetadata, - mut pre_batches: ZeroCopySliceMutU64, pre_value_vecs: &mut Vec>, pre_roots: Vec<[u8; 32]>, mut pre_hashchains: Vec>, @@ -129,7 +126,11 @@ pub fn assert_input_queue_insert( let inserted_batch_index = pre_account.queue_metadata.currently_processing_batch_index as usize; - let expected_batch = pre_batches.get_mut(inserted_batch_index).unwrap(); + let expected_batch = pre_account + .queue_metadata + .batches + .get_mut(inserted_batch_index) + .unwrap(); println!( "assert input queue batch update: expected_batch: {:?}", expected_batch @@ -164,9 +165,9 @@ pub fn assert_input_queue_insert( ); // New value exists in the current batch bloom filter let mut bloom_filter = light_bloom_filter::BloomFilter::new( - merkle_tree_account.batches[inserted_batch_index].num_iters as usize, - merkle_tree_account.batches[inserted_batch_index].bloom_filter_capacity, - merkle_tree_account.bloom_filter_stores[inserted_batch_index].as_mut_slice(), + merkle_tree_account.queue_metadata.batches[inserted_batch_index].num_iters as usize, + merkle_tree_account.queue_metadata.batches[inserted_batch_index].bloom_filter_capacity, + merkle_tree_account.bloom_filter_stores[inserted_batch_index], ) .unwrap(); println!( @@ -177,16 +178,21 @@ pub fn assert_input_queue_insert( let pre_hashchain = pre_hashchains.get_mut(inserted_batch_index).unwrap(); expected_batch.add_to_hash_chain(&leaf_hashchain_insert_values[i], pre_hashchain)?; + let num_iters = + merkle_tree_account.queue_metadata.batches[inserted_batch_index].num_iters as usize; + let bloom_filter_capacity = + merkle_tree_account.queue_metadata.batches[inserted_batch_index].bloom_filter_capacity; // New value does not exist in the other batch bloom_filters - for (i, batch) in merkle_tree_account.batches.iter_mut().enumerate() { + for (i, store) in merkle_tree_account + .bloom_filter_stores + .iter_mut() + .enumerate() + { // Skip current batch it is already checked above if i != inserted_batch_index { - let mut bloom_filter = light_bloom_filter::BloomFilter::new( - batch.num_iters as usize, - batch.bloom_filter_capacity, - merkle_tree_account.bloom_filter_stores[i].as_mut_slice(), - ) - .unwrap(); + let mut bloom_filter = + light_bloom_filter::BloomFilter::new(num_iters, bloom_filter_capacity, store) + .unwrap(); assert!(!bloom_filter.contains(insert_value)); } } @@ -195,7 +201,7 @@ pub fn assert_input_queue_insert( // update if expected_batch.get_current_zkp_batch_index() == expected_batch.get_num_zkp_batches() { assert_eq!( - merkle_tree_account.batches + merkle_tree_account.queue_metadata.batches [pre_account.queue_metadata.currently_processing_batch_index as usize] .get_state(), BatchState::Full @@ -204,7 +210,7 @@ pub fn assert_input_queue_insert( pre_account.queue_metadata.currently_processing_batch_index %= pre_account.queue_metadata.num_batches; assert_eq!( - merkle_tree_account.batches[inserted_batch_index], + merkle_tree_account.queue_metadata.batches[inserted_batch_index], *expected_batch ); assert_eq!( @@ -223,18 +229,18 @@ pub fn assert_input_queue_insert( "BatchedMerkleTreeMetadata changed." ); let inserted_batch_index = pre_account.queue_metadata.currently_processing_batch_index as usize; - let mut expected_batch = pre_batches[inserted_batch_index]; + let mut expected_batch = pre_account.queue_metadata.batches[inserted_batch_index]; if should_be_zeroed { expected_batch.set_bloom_filter_to_zeroed(); } assert_eq!( - merkle_tree_account.batches[inserted_batch_index], + merkle_tree_account.queue_metadata.batches[inserted_batch_index], expected_batch ); let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; assert_eq!( - merkle_tree_account.batches[other_batch], - pre_batches[other_batch] + merkle_tree_account.queue_metadata.batches[other_batch], + pre_account.queue_metadata.batches[other_batch] ); assert_eq!( merkle_tree_account.hashchain_store, *pre_hashchains, @@ -249,30 +255,30 @@ pub fn assert_input_queue_insert( /// - if batch is full after insertion advance state to ReadyToUpdateTree pub fn assert_output_queue_insert( mut pre_account: BatchedQueueMetadata, - mut pre_batches: Vec, + // mut pre_batches: Vec, mut pre_value_store: Vec>, mut pre_hashchains: Vec>, mut output_account: BatchedQueueAccount, insert_values: Vec<[u8; 32]>, ) -> Result<(), BatchedMerkleTreeError> { - for batch in output_account.batches.iter_mut() { + for batch in output_account.batch_metadata.batches.iter_mut() { println!("output_account.batch: {:?}", batch); } - for batch in pre_batches.iter() { + for batch in pre_account.batch_metadata.batches.iter() { println!("pre_batch: {:?}", batch); } for insert_value in insert_values.iter() { // There are no bloom_filters - for store in output_account.bloom_filter_stores.iter() { - assert_eq!(store.len(), 0); - } + // for store in output_account.bloom_filter_stores.iter() { + // assert_eq!(store.len(), 0); + // } // if the currently processing batch changed it should // increment by one and the old batch should be ready to // update let inserted_batch_index = pre_account.batch_metadata.currently_processing_batch_index as usize; - let expected_batch = &mut pre_batches[inserted_batch_index]; + let expected_batch = &mut pre_account.batch_metadata.batches[inserted_batch_index]; let pre_value_store = pre_value_store.get_mut(inserted_batch_index).unwrap(); let pre_hashchain = pre_hashchains.get_mut(inserted_batch_index).unwrap(); if expected_batch.get_state() == BatchState::Inserted { @@ -295,7 +301,7 @@ pub fn assert_output_queue_insert( .contains(insert_value)); if expected_batch.get_num_zkp_batches() == expected_batch.get_current_zkp_batch_index() { assert!( - output_account.batches + output_account.batch_metadata.batches [pre_account.batch_metadata.currently_processing_batch_index as usize] .get_state() == BatchState::Full @@ -304,17 +310,17 @@ pub fn assert_output_queue_insert( pre_account.batch_metadata.currently_processing_batch_index %= pre_account.batch_metadata.num_batches; assert_eq!( - output_account.batches[inserted_batch_index], + output_account.batch_metadata.batches[inserted_batch_index], *expected_batch ); } } let inserted_batch_index = pre_account.batch_metadata.currently_processing_batch_index as usize; - let expected_batch = &pre_batches[inserted_batch_index]; - assert_eq!( - output_account.batches[inserted_batch_index], - *expected_batch - ); + // let expected_batch = &pre_batches[inserted_batch_index]; + // assert_eq!( + // output_account.batch_metadata.batches[inserted_batch_index], + // *expected_batch + // ); assert_eq!( *output_account.get_metadata(), pre_account, @@ -335,7 +341,6 @@ pub fn assert_output_queue_insert( ); } } - assert_eq!(pre_batches, output_account.batches.to_vec()); assert_eq!(pre_value_store, output_account.value_vecs); Ok(()) } @@ -373,12 +378,17 @@ pub fn simulate_transaction( println!("simulate_transaction: inclusion is none"); let mut included = false; let mut leaf_index = 0; + let start_indices = merkle_tree_account + .queue_metadata + .batches + .iter() + .map(|batch| batch.start_index) + .collect::>(); for (batch_index, value_vec) in output_account.value_vecs.iter_mut().enumerate() { for (value_index, value) in value_vec.iter_mut().enumerate() { if *value == *input { - let batch_start_index = - output_account.batches.get(batch_index).unwrap().start_index; + let batch_start_index = start_indices[batch_index]; included = true; println!("overwriting value: {:?}", value); *value = [0u8; 32]; @@ -528,7 +538,11 @@ async fn test_simulate_transactions() { panic!("Leaf not found in output queue."); } } - let batch = output_queue.batches.get(batch_index).unwrap(); + let batch = output_queue + .batch_metadata + .batches + .get(batch_index) + .unwrap(); array_indices.push(leaf_array_index); let leaf_index: u64 = batch.start_index + leaf_array_index as u64; leaf_indices.push(leaf_index); @@ -548,7 +562,7 @@ async fn test_simulate_transactions() { BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); println!( "input queue: {:?}", - merkle_tree_account.batches[0].get_num_inserted() + merkle_tree_account.queue_metadata.batches[0].get_num_inserted() ); let mut pre_mt_data = mt_account_data.clone(); @@ -557,7 +571,7 @@ async fn test_simulate_transactions() { let pre_output_account = BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); let pre_output_metadata = *pre_output_account.get_metadata(); - let pre_output_batches = pre_output_account.batches; + let pre_output_batches = pre_output_account.batch_metadata.batches; let mut pre_output_value_stores = pre_output_account.value_vecs; let pre_output_hashchains = pre_output_account.hashchain_store; @@ -565,7 +579,7 @@ async fn test_simulate_transactions() { let pre_merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_bytes).unwrap(); let pre_mt_account = *pre_merkle_tree_account.get_metadata(); - let pre_batches = pre_merkle_tree_account.batches; + let pre_batches = pre_merkle_tree_account.queue_metadata.batches; // let pre_value_store = pre_merkle_tree_account.value_vecs; let pre_roots = pre_merkle_tree_account .root_history @@ -591,7 +605,6 @@ async fn test_simulate_transactions() { println!("inputs: {:?}", inputs); assert_nullifier_queue_insert( pre_mt_account, - pre_batches, &mut pre_output_value_stores, // mut to remove values proven by index pre_roots, pre_mt_hashchains, @@ -608,7 +621,6 @@ async fn test_simulate_transactions() { if !outputs.is_empty() { assert_output_queue_insert( pre_output_metadata, - pre_output_batches.to_vec(), pre_output_value_stores, pre_output_hashchains, BatchedQueueAccount::output_from_bytes( @@ -635,12 +647,14 @@ async fn test_simulate_transactions() { let merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data).unwrap(); in_ready_for_update = merkle_tree_account + .queue_metadata .batches .iter() .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); let output_account = BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); out_ready_for_update = output_account + .batch_metadata .batches .iter() .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); @@ -663,10 +677,14 @@ async fn test_simulate_transactions() { let (input_res, new_root) = { let mut account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data).unwrap(); - println!("batches {:?}", account.batches); + println!("batches {:?}", account.queue_metadata.batches); let next_full_batch = account.get_metadata().queue_metadata.next_full_batch_index; - let batch = account.batches.get(next_full_batch as usize).unwrap(); + let batch = account + .queue_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); println!( "account .hashchain_store {:?}", @@ -744,6 +762,7 @@ async fn test_simulate_transactions() { .batch_metadata .next_full_batch_index; let batch = output_account + .batch_metadata .batches .get(next_full_batch as usize) .unwrap(); @@ -799,8 +818,8 @@ async fn test_simulate_transactions() { let old_account = BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batches[0]); - println!("batch 1: {:?}", output_account.batches[1]); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); assert_batch_append_event_event( batch_append_event, new_root, @@ -824,8 +843,8 @@ async fn test_simulate_transactions() { } let output_account = BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batches[0]); - println!("batch 1: {:?}", output_account.batches[1]); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); println!("num_output_updates: {}", num_output_updates); println!("num_input_updates: {}", num_input_updates); println!("num_output_values: {}", num_output_values); @@ -915,7 +934,7 @@ async fn test_e2e() { let pre_output_account = BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); let pre_account = *pre_output_account.get_metadata(); - let pre_batches = pre_output_account.batches; + let pre_batches = pre_output_account.batch_metadata.batches; let pre_value_store = pre_output_account.value_vecs; let pre_hashchains = pre_output_account.hashchain_store; let mut output_account = @@ -925,7 +944,6 @@ async fn test_e2e() { .unwrap(); assert_output_queue_insert( pre_account, - pre_batches.to_vec(), pre_value_store, pre_hashchains, BatchedQueueAccount::output_from_bytes( @@ -941,6 +959,7 @@ async fn test_e2e() { let output_account = BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); out_ready_for_update = output_account + .batch_metadata .batches .iter() .any(|batch| batch.get_state() == BatchState::Full); @@ -957,7 +976,7 @@ async fn test_e2e() { let pre_mt_account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_account_bytes).unwrap(); let pre_account = *pre_mt_account.get_metadata(); - let pre_batches = pre_mt_account.batches; + let pre_batches = pre_mt_account.queue_metadata.batches; let pre_hashchains = pre_mt_account.hashchain_store; let pre_roots = pre_mt_account.root_history.iter().cloned().collect(); let tx_hash = create_hash_chain_from_slice(vec![leaf].as_slice()).unwrap(); @@ -985,7 +1004,6 @@ async fn test_e2e() { BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); assert_nullifier_queue_insert( pre_account, - pre_batches, &mut vec![], pre_roots, pre_hashchains, @@ -1004,6 +1022,7 @@ async fn test_e2e() { BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); in_ready_for_update = merkle_tree_account + .queue_metadata .batches .iter() .any(|batch| batch.get_state() == BatchState::Full); @@ -1042,6 +1061,7 @@ async fn test_e2e() { .batch_metadata .next_full_batch_index; let batch = output_account + .batch_metadata .batches .get(next_full_batch as usize) .unwrap(); @@ -1117,8 +1137,8 @@ async fn test_e2e() { let old_account = BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batches[0]); - println!("batch 1: {:?}", output_account.batches[1]); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); assert_merkle_tree_update( old_account, account, @@ -1135,8 +1155,8 @@ async fn test_e2e() { } let output_account = BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batches[0]); - println!("batch 1: {:?}", output_account.batches[1]); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); println!("num_output_updates: {}", num_output_updates); println!("num_input_updates: {}", num_input_updates); println!("num_output_values: {}", num_output_values); @@ -1155,7 +1175,11 @@ pub async fn perform_input_update( let mut account = BatchedMerkleTreeAccount::state_from_bytes(mt_account_data).unwrap(); let next_full_batch = account.get_metadata().queue_metadata.next_full_batch_index; - let batch = account.batches.get(next_full_batch as usize).unwrap(); + let batch = account + .queue_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); let leaves_hashchain = account .hashchain_store .get(next_full_batch as usize) @@ -1216,7 +1240,11 @@ pub async fn perform_address_update( let next_full_batch = account.get_metadata().queue_metadata.next_full_batch_index; let next_index = account.get_metadata().next_index; println!("next index {:?}", next_index); - let batch = account.batches.get(next_full_batch as usize).unwrap(); + let batch = account + .queue_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); let batch_start_index = batch.start_index; let leaves_hashchain = account .hashchain_store @@ -1264,7 +1292,11 @@ pub async fn perform_address_update( let account = BatchedMerkleTreeAccount::address_from_bytes(mt_account_data).unwrap(); { - let batch = account.batches.get(pre_next_full_batch as usize).unwrap(); + let batch = account + .queue_metadata + .batches + .get(pre_next_full_batch as usize) + .unwrap(); if batch.get_state() == BatchState::Inserted { mock_indexer.finalize_batch_address_update(batch.batch_size as usize); } @@ -1275,37 +1307,152 @@ pub async fn perform_address_update( } fn assert_merkle_tree_update( - old_account: BatchedMerkleTreeAccount, + mut old_account: BatchedMerkleTreeAccount, account: BatchedMerkleTreeAccount, old_queue_account: Option, queue_account: Option, root: [u8; 32], ) { + // Output queue update + if let Some(mut old_queue_account) = old_queue_account { + let queue_account = queue_account.unwrap(); + let old_full_batch_index = old_queue_account.batch_metadata.next_full_batch_index; + let old_full_batch = old_queue_account + .batch_metadata + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + println!("old full batch {:?}", old_full_batch); + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + old_account.root_history.capacity() as u32, + ) + .unwrap(); + + if old_full_batch.get_state() == BatchState::Inserted { + old_queue_account.batch_metadata.next_full_batch_index += 1; + old_queue_account.batch_metadata.next_full_batch_index %= 2; + } + assert_eq!( + queue_account.get_metadata(), + old_queue_account.get_metadata() + ); + assert_eq!(queue_account, old_queue_account); + // Only the output queue appends state + let zkp_batch_size = old_account.queue_metadata.zkp_batch_size; + old_account.next_index += zkp_batch_size; + } else { + // Input queue update + let old_full_batch_index = old_account.queue_metadata.next_full_batch_index; + let history_capacity = old_account.root_history.capacity(); + let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; + + let old_full_batch = old_account + .queue_metadata + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + history_capacity as u32, + ) + .unwrap(); + println!( + "current batch {:?}", + old_full_batch.get_num_inserted_elements() + ); + + if old_full_batch.get_state() == BatchState::Inserted { + old_account.queue_metadata.next_full_batch_index += 1; + old_account.queue_metadata.next_full_batch_index %= 2; + } + let old_full_batch_index = old_account.queue_metadata.next_full_batch_index; + + let old_full_batch = old_account + .queue_metadata + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + let zeroed_batch = + old_full_batch.get_num_inserted_elements() >= old_full_batch.batch_size / 2; + println!("zeroed_batch: {:?}", zeroed_batch); + + // let current_batch = old_account.queue_metadata.get_current_batch(); + + let state = old_account.queue_metadata.batches[previous_full_batch_index].get_state(); + let previous_batch = old_account + .queue_metadata + .batches + .get_mut(previous_full_batch_index as usize) + .unwrap(); + println!("state: {:?}", state); + if zeroed_batch && state == BatchState::Inserted { + previous_batch.set_bloom_filter_to_zeroed(); + let sequence_number = previous_batch.sequence_number; + let root_index = previous_batch.root_index; + old_account.bloom_filter_stores[previous_full_batch_index as usize] + .iter_mut() + .for_each(|elem| { + *elem = 0; + }); + + let mut oldest_root_index = old_account.root_history.first_index(); + + let num_remaining_roots = sequence_number - old_account.sequence_number; + for _ in 1..num_remaining_roots { + println!("zeroing out root index: {}", oldest_root_index); + old_account.root_history[oldest_root_index] = [0u8; 32]; + oldest_root_index += 1; + oldest_root_index %= old_account.root_history.len(); + } + } + } + + old_account.sequence_number += 1; + old_account.root_history.push(root); + assert_eq!(account.get_metadata(), old_account.get_metadata()); + assert_eq!(account, old_account); + assert_eq!(*account.root_history.last().unwrap(), root); + + /* let mut expected_account = *old_account.get_metadata(); expected_account.sequence_number += 1; let actual_account = *account.get_metadata(); - // We only have two batches. - let previous_full_batch_index = if expected_account.queue_metadata.next_full_batch_index == 0 { - 1 - } else { - 0 - }; - let (batches, mut previous_batches, expected_queue_account, mut next_full_batch_index) = + let (batches, mut previous_batches, mut expected_queue_account, mut next_full_batch_index) = if let Some(queue_account) = queue_account.as_ref() { let expected_queue_account = *old_queue_account.as_ref().unwrap().get_metadata(); - expected_account.next_index += queue_account.batches.get(0).unwrap().zkp_batch_size; + expected_account.next_index += queue_account + .batch_metadata + .batches + .get(0) + .unwrap() + .zkp_batch_size; let next_full_batch_index = expected_queue_account.batch_metadata.next_full_batch_index; ( - queue_account.batches.to_vec(), - old_queue_account.as_ref().unwrap().batches.to_vec(), + queue_account.batch_metadata.batches.to_vec(), + old_queue_account + .as_ref() + .unwrap() + .batch_metadata + .batches + .to_vec(), Some(expected_queue_account), next_full_batch_index, ) } else { - let mut batches = old_account.batches.to_vec(); + // let mut batches = old_account.queue_metadata.batches.to_vec(); println!("previous_full_batch_index: {:?}", previous_full_batch_index); - let previous_batch = batches.get_mut(previous_full_batch_index as usize).unwrap(); + let previous_batch = old_account + .queue_metadata + .batches + .get_mut(previous_full_batch_index as usize) + .unwrap(); println!("previous_batch state: {:?}", previous_batch.get_state()); println!( "previous_batch zeroed?: {:?}", @@ -1313,8 +1460,11 @@ fn assert_merkle_tree_update( ); let previous_batch_is_ready = previous_batch.get_state() == BatchState::Inserted && !previous_batch.bloom_filter_is_zeroed(); - let batch = batches - .get_mut(old_account.queue_metadata.next_full_batch_index as usize) + let next_full_batch_index = old_account.queue_metadata.next_full_batch_index as usize; + let batch = old_account + .queue_metadata + .batches + .get_mut(next_full_batch_index) .unwrap(); println!("previous_batch_is_ready: {:?}", previous_batch_is_ready); @@ -1331,19 +1481,36 @@ fn assert_merkle_tree_update( let zeroed_batch = batch.get_num_inserted_elements() + batch.zkp_batch_size >= batch.batch_size / 2 && previous_batch_is_ready; - let previous_batch = batches.get_mut(previous_full_batch_index as usize).unwrap(); + let previous_batch = old_account + .queue_metadata + .batches + .get_mut(previous_full_batch_index as usize) + .unwrap(); if zeroed_batch { previous_batch.set_bloom_filter_to_zeroed(); println!("set bloom filter is zeroed"); } - (account.batches.to_vec(), batches, None, 0) + ( + account.queue_metadata.batches.to_vec(), + old_account.queue_metadata.batches.to_vec(), + None, + 0, + ) }; let mut checked_one = false; - + let mut num_inserted_zkp = [0, 0]; for (i, batch) in batches.iter().enumerate() { - let previous_batch = previous_batches.get_mut(i).unwrap(); + let previous_batch = &mut if let Some(queue_account) = queue_account.as_ref() { + expected_queue_account + .as_mut() + .unwrap() + .batch_metadata + .batches[i] + } else { + expected_account.queue_metadata.batches[i] + }; let expected_sequence_number = account.root_history.capacity() as u64 + account.get_metadata().sequence_number; @@ -1351,7 +1518,8 @@ fn assert_merkle_tree_update( && batch.get_state() == BatchState::Inserted; let updated_batch = previous_batch.get_first_ready_zkp_batch().is_ok() && !checked_one; - + println!("updated_batch: {:?}", updated_batch); + println!("batch_fully_inserted: {:?}", batch_fully_inserted); // Assert fully inserted batch if batch_fully_inserted { if queue_account.is_some() { @@ -1362,39 +1530,79 @@ fn assert_merkle_tree_update( expected_account.queue_metadata.next_full_batch_index %= expected_account.queue_metadata.num_batches; } - assert_eq!(batch.root_index as usize, account.root_history.last_index()); - assert_eq!(batch.get_num_inserted_zkps(), 0); - assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); - assert_eq!(batch.get_num_inserted(), 0); - assert_ne!(batch.sequence_number, previous_batch.sequence_number); - assert_eq!(batch.get_current_zkp_batch_index(), 0); - assert_ne!(batch.get_state(), previous_batch.get_state()); - } - // assert updated batch - else if updated_batch { + num_inserted_zkp[i] += 1; + // assert_eq!(batch.root_index as usize, account.root_history.last_index()); + // assert_eq!(batch.get_num_inserted_zkps(), 0); + // assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); + // assert_eq!(batch.get_num_inserted(), 0); + // assert_ne!(batch.sequence_number, previous_batch.sequence_number); + // assert_eq!(batch.get_current_zkp_batch_index(), 0); + // assert_ne!(batch.get_state(), previous_batch.get_state()); + } else if updated_batch { checked_one = true; - assert_eq!( - batch.get_num_inserted_zkps(), - previous_batch.get_num_inserted_zkps() + 1 - ); - assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); - assert_eq!(batch.sequence_number, previous_batch.sequence_number); - assert_eq!(batch.root_index, previous_batch.root_index); - assert_eq!( - batch.get_current_zkp_batch_index(), - previous_batch.get_current_zkp_batch_index() + num_inserted_zkp[i] += 1; + println!( + "previous_batch.num_inserted_zkps: {:?}", + previous_batch.num_inserted_zkps ); - assert_eq!(batch.get_state(), previous_batch.get_state()); - assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); - } else { - assert_eq!(*batch, *previous_batch); } + // // assert updated batch + // else if updated_batch { + // checked_one = true; + // assert_eq!( + // batch.get_num_inserted_zkps(), + // previous_batch.get_num_inserted_zkps() + 1 + // ); + // assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); + + // assert_eq!(batch.sequence_number, previous_batch.sequence_number); + // assert_eq!(batch.root_index, previous_batch.root_index); + // assert_eq!( + // batch.get_current_zkp_batch_index(), + // previous_batch.get_current_zkp_batch_index() + // ); + // assert_eq!(batch.get_state(), previous_batch.get_state()); + // assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); + // } else { + // assert_eq!(*batch, *previous_batch); + // } + // if let Some(queue_account) = queue_account.as_ref() { + // expected_queue_account.unwrap().batch_metadata.batches[i] = *batch; + // } else { + // expected_account.queue_metadata.batches[i] = *batch; + // } } if let Some(queue_account) = queue_account.as_ref() { let mut expected_queue_account = expected_queue_account.unwrap(); expected_queue_account.batch_metadata.next_full_batch_index = next_full_batch_index; + println!("num_inserted_zkp: {:?}", num_inserted_zkp); + expected_queue_account + .batch_metadata + .batches + .iter_mut() + .enumerate() + .for_each(|(i, batch)| { + batch.num_inserted_zkps += num_inserted_zkp[i]; + if batch.num_inserted_zkps == 5 { + batch.advance_state_to_inserted().unwrap(); + batch.num_inserted_zkps = 0; + } + }); assert_eq!(*queue_account.get_metadata(), expected_queue_account); + } else { + expected_account + .queue_metadata + .batches + .iter_mut() + .enumerate() + .for_each(|(i, batch)| { + batch.num_inserted_zkps += num_inserted_zkp[i]; + if batch.num_inserted_zkps == 5 { + batch.advance_state_to_inserted().unwrap(); + batch.num_inserted_zkps = 0; + } + }); } assert_eq!(actual_account, expected_account); @@ -1407,6 +1615,7 @@ fn assert_merkle_tree_update( println!("old_account: root {:?}", root); } assert_eq!(*account.root_history.last().unwrap(), root); + */ } pub fn get_rnd_bytes(rng: &mut StdRng) -> [u8; 32] { @@ -1419,7 +1628,7 @@ pub fn get_rnd_bytes(rng: &mut StdRng) -> [u8; 32] { #[tokio::test] async fn test_fill_queues_completely() { spawn_prover( - true, + false, ProverConfig { run_mode: None, circuits: vec![ @@ -1480,7 +1689,7 @@ async fn test_fill_queues_completely() { let pre_output_account = BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_account_data).unwrap(); let pre_account = *pre_output_account.get_metadata(); - let pre_batches = pre_output_account.batches.to_vec(); + let pre_batches = pre_output_account.batch_metadata.batches.to_vec(); let pre_value_store = pre_output_account.value_vecs; let pre_hashchains = pre_output_account.hashchain_store; @@ -1492,7 +1701,6 @@ async fn test_fill_queues_completely() { .unwrap(); assert_output_queue_insert( pre_account, - pre_batches, pre_value_store, pre_hashchains, BatchedQueueAccount::output_from_bytes( @@ -1512,6 +1720,7 @@ async fn test_fill_queues_completely() { assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); output_account + .batch_metadata .batches .iter() .for_each(|b| assert_eq!(b.get_state(), BatchState::Full)); @@ -1534,6 +1743,7 @@ async fn test_fill_queues_completely() { .batch_metadata .next_full_batch_index; let batch = output_account + .batch_metadata .batches .get(next_full_batch as usize) .unwrap(); @@ -1601,7 +1811,7 @@ async fn test_fill_queues_completely() { let pre_merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data).unwrap(); let pre_account = *pre_merkle_tree_account.get_metadata(); - let pre_batches = pre_merkle_tree_account.batches; + let pre_batches = pre_merkle_tree_account.queue_metadata.batches; let pre_roots = pre_merkle_tree_account .root_history .iter() @@ -1630,7 +1840,6 @@ async fn test_fill_queues_completely() { .unwrap(); assert_nullifier_queue_insert( pre_account, - pre_batches, &mut vec![], pre_roots, pre_hashchains, @@ -1701,12 +1910,12 @@ async fn test_fill_queues_completely() { if i >= 7 { let merkle_tree_account = &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - let batch = merkle_tree_account.batches.get(0).unwrap(); + let batch = merkle_tree_account.queue_metadata.batches.get(0).unwrap(); assert!(batch.bloom_filter_is_zeroed()); } else { let merkle_tree_account = &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - let batch = merkle_tree_account.batches.get(0).unwrap(); + let batch = merkle_tree_account.queue_metadata.batches.get(0).unwrap(); assert!(!batch.bloom_filter_is_zeroed()); } println!( @@ -1732,7 +1941,12 @@ async fn test_fill_queues_completely() { { let merkle_tree_account = &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - for (i, batch) in merkle_tree_account.batches.iter().enumerate() { + for (i, batch) in merkle_tree_account + .queue_metadata + .batches + .iter() + .enumerate() + { assert_eq!(batch.get_state(), BatchState::Inserted); if i == 0 { assert!(batch.bloom_filter_is_zeroed()); @@ -1745,7 +1959,7 @@ async fn test_fill_queues_completely() { { let merkle_tree_account = &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data).unwrap(); - let pre_batch_zero = *merkle_tree_account.batches.get(0).unwrap(); + let pre_batch_zero = *merkle_tree_account.queue_metadata.batches.get(0).unwrap(); let value = &get_rnd_bytes(&mut rng); let tx_hash = &get_rnd_bytes(&mut rng); @@ -1753,7 +1967,7 @@ async fn test_fill_queues_completely() { .insert_nullifier_into_current_batch(value, 0, tx_hash) .unwrap(); { - let post_batch = *merkle_tree_account.batches.get(0).unwrap(); + let post_batch = *merkle_tree_account.queue_metadata.batches.get(0).unwrap(); assert_eq!(post_batch.get_state(), BatchState::Fill); assert_eq!(post_batch.get_num_inserted(), 1); let bloom_filter_store = @@ -1761,7 +1975,7 @@ async fn test_fill_queues_completely() { let mut bloom_filter = BloomFilter::new( params.bloom_filter_num_iters as usize, params.bloom_filter_capacity, - bloom_filter_store.as_mut_slice(), + bloom_filter_store, ) .unwrap(); assert!(bloom_filter.contains(value)); @@ -1851,7 +2065,7 @@ async fn test_fill_address_tree_completely() { let pre_merkle_tree_account = BatchedMerkleTreeAccount::address_from_bytes(&mut pre_account_data).unwrap(); let pre_account = *pre_merkle_tree_account.get_metadata(); - let pre_batches = pre_merkle_tree_account.batches; + let pre_batches = pre_merkle_tree_account.queue_metadata.batches; let pre_roots = pre_merkle_tree_account .root_history .iter() @@ -1865,7 +2079,6 @@ async fn test_fill_address_tree_completely() { .unwrap(); assert_input_queue_insert( pre_account, - pre_batches, &mut vec![], pre_roots, pre_hashchains, @@ -1929,8 +2142,8 @@ async fn test_fill_address_tree_completely() { } let merkle_tree_account = BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data).unwrap(); - let batch = merkle_tree_account.batches.get(0).unwrap(); - let batch_one = merkle_tree_account.batches.get(1).unwrap(); + let batch = merkle_tree_account.queue_metadata.batches.get(0).unwrap(); + let batch_one = merkle_tree_account.queue_metadata.batches.get(1).unwrap(); assert!(!batch_one.bloom_filter_is_zeroed()); if i >= 7 { @@ -1943,7 +2156,12 @@ async fn test_fill_address_tree_completely() { { let merkle_tree_account = &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data).unwrap(); - for (i, batch) in merkle_tree_account.batches.iter().enumerate() { + for (i, batch) in merkle_tree_account + .queue_metadata + .batches + .iter() + .enumerate() + { assert_eq!(batch.get_state(), BatchState::Inserted); if i == 0 { assert!(batch.bloom_filter_is_zeroed()); @@ -1956,7 +2174,7 @@ async fn test_fill_address_tree_completely() { let merkle_tree_account = &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data).unwrap(); println!("root history {:?}", merkle_tree_account.root_history); - let pre_batch_zero = *merkle_tree_account.batches.get(0).unwrap(); + let pre_batch_zero = *merkle_tree_account.queue_metadata.batches.get(0).unwrap(); for root in merkle_tree_account.root_history.iter() { println!("root {:?}", root); diff --git a/program-libs/batched-merkle-tree/tests/queue.rs b/program-libs/batched-merkle-tree/tests/queue.rs index f78e97dd28..caf905d86c 100644 --- a/program-libs/batched-merkle-tree/tests/queue.rs +++ b/program-libs/batched-merkle-tree/tests/queue.rs @@ -1,4 +1,5 @@ use light_batched_merkle_tree::{ + batch::Batch, batch_metadata::BatchMetadata, errors::BatchedMerkleTreeError, queue::{assert_queue_zero_copy_inited, BatchedQueueAccount, BatchedQueueMetadata}, @@ -34,6 +35,7 @@ pub fn get_test_account_and_account_data( next_full_batch_index: 0, bloom_filter_capacity, zkp_batch_size: 10, + batches: [Batch::default(); 2], }, ..Default::default() }; @@ -73,7 +75,7 @@ fn test_output_queue_account() { ) .unwrap(); - assert_queue_zero_copy_inited(&mut account_data, ref_account, bloom_filter_num_iters); + assert_queue_zero_copy_inited(&mut account_data, ref_account); let mut account = BatchedQueueAccount::output_from_bytes(&mut account_data).unwrap(); let value = [1u8; 32]; account.insert_into_current_batch(&value).unwrap(); diff --git a/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs b/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs index 3770572304..94e25a8d57 100644 --- a/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs +++ b/program-libs/batched-merkle-tree/tests/rollover_address_tree.rs @@ -41,11 +41,7 @@ fn test_rollover() { let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(create_tree_params, merkle_tree_rent); - assert_address_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); let mut new_mt_account_data = vec![0; mt_account_size]; let new_mt_pubkey = Pubkey::new_unique(); @@ -111,7 +107,6 @@ fn test_rollover() { new_mt_account_data.to_vec(), new_ref_mt_account, new_mt_pubkey, - params.bloom_filter_num_iters, ); } // 4. Failing: already rolled over @@ -220,11 +215,7 @@ fn test_rnd_rollover() { let ref_mt_account = BatchedMerkleTreeMetadata::new_address_tree(create_tree_params, merkle_tree_rent); - assert_address_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_address_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); let mut new_mt_data = vec![0; mt_account_size]; let new_mt_rent = merkle_tree_rent; let network_fee = params.network_fee; @@ -250,7 +241,6 @@ fn test_rnd_rollover() { new_mt_data, new_ref_mt_account, new_mt_pubkey, - params.bloom_filter_num_iters, ); } } diff --git a/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs b/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs index e9143512bc..6acefef930 100644 --- a/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs +++ b/program-libs/batched-merkle-tree/tests/rollover_state_tree.rs @@ -69,20 +69,12 @@ fn test_rollover() { let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(create_tree_params, queue_pubkey); - assert_state_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); let total_rent = merkle_tree_rent + additional_bytes_rent + queue_rent; let output_queue_params = CreateOutputQueueParams::from(params, owner, total_rent, mt_pubkey); let ref_output_queue_account = create_output_queue_account(output_queue_params); - assert_queue_zero_copy_inited( - queue_account_data.as_mut_slice(), - ref_output_queue_account, - 0, - ); + assert_queue_zero_copy_inited(queue_account_data.as_mut_slice(), ref_output_queue_account); let mut new_mt_account_data = vec![0; mt_account_size]; let new_mt_pubkey = Pubkey::new_unique(); @@ -265,7 +257,6 @@ fn test_rollover() { new_mt_account_data: new_mt_account_data.to_vec(), old_mt_pubkey: mt_pubkey, new_mt_pubkey, - bloom_filter_num_iters: params.bloom_filter_num_iters, ref_rolledover_mt, queue_account_data: queue_account_data.to_vec(), ref_queue_account: new_ref_output_queue_account, @@ -393,11 +384,7 @@ fn test_rollover() { .rollover_metadata .network_fee = 0; ref_output_queue_account.metadata.access_metadata.forester = forester; - assert_queue_zero_copy_inited( - queue_account_data.as_mut_slice(), - ref_output_queue_account, - 0, - ); + assert_queue_zero_copy_inited(queue_account_data.as_mut_slice(), ref_output_queue_account); // 8. Functional: rollover address tree with network fee 0 additional bytes 0 { let merkle_tree = @@ -443,7 +430,6 @@ fn test_rollover() { new_mt_account_data: new_mt_account_data.to_vec(), old_mt_pubkey: mt_pubkey, new_mt_pubkey, - bloom_filter_num_iters: params.bloom_filter_num_iters, ref_rolledover_mt, queue_account_data: queue_account_data.to_vec(), ref_queue_account: new_ref_output_queue_account, @@ -553,17 +539,12 @@ fn test_rnd_rollover() { assert_queue_zero_copy_inited( output_queue_account_data.as_mut_slice(), ref_output_queue_account, - 0, ); let create_tree_params = CreateTreeParams::from_state_ix_params(params, owner); let ref_mt_account = BatchedMerkleTreeMetadata::new_state_tree(create_tree_params, output_queue_pubkey); - assert_state_mt_zero_copy_inited( - &mut mt_account_data, - ref_mt_account, - params.bloom_filter_num_iters, - ); + assert_state_mt_zero_copy_inited(&mut mt_account_data, ref_mt_account); let mut new_mt_account_data = vec![0; mt_account_size]; let new_mt_pubkey = Pubkey::new_unique(); @@ -612,7 +593,6 @@ fn test_rnd_rollover() { new_mt_account_data: new_mt_account_data.to_vec(), old_mt_pubkey: mt_pubkey, new_mt_pubkey, - bloom_filter_num_iters: params.bloom_filter_num_iters, ref_rolledover_mt, queue_account_data: output_queue_account_data.to_vec(), ref_queue_account: new_ref_output_queue_account, diff --git a/program-tests/system-cpi-test/tests/test.rs b/program-tests/system-cpi-test/tests/test.rs index 49328bd54a..9ac926e2cf 100644 --- a/program-tests/system-cpi-test/tests/test.rs +++ b/program-tests/system-cpi-test/tests/test.rs @@ -760,7 +760,6 @@ async fn only_test_create_pda() { assert_rpc_error( result, 0, - // UtilsError::AccountNotMutable.into(), UtilsError::InvalidDiscriminator.into(), ) .unwrap();