diff --git a/custom-hooks/README.md b/custom-hooks/README.md new file mode 100644 index 000000000..5ab533106 --- /dev/null +++ b/custom-hooks/README.md @@ -0,0 +1,4 @@ +## Hook Installation + +To install hooks in this file, from the root directory navigate to `./.git/hooks`. Then replace the corresponding file with the one in this folder. If you haven't installed any hooks before it will be called `.sample` instead of just ``. You may also +have to make the files inside `./.git/hooks` executable by running `chmod +x ./.git/hooks/*` from the project root. \ No newline at end of file diff --git a/custom-hooks/pre-commit b/custom-hooks/pre-commit new file mode 100644 index 000000000..64e149465 --- /dev/null +++ b/custom-hooks/pre-commit @@ -0,0 +1,22 @@ +#!/bin/sh + +set -eu + +if cargo fmt --all -- --check +then + echo "cargo fmt OK" +else + echo "There are some code style issues." + echo "Run cargo fmt first." + exit 1 +fi + +if cargo clippy --all-targets -- -D warnings +then + echo "cargo clippy OK" +else + echo "There are some clippy issues." + exit 1 +fi + +exit 0 \ No newline at end of file diff --git a/groth16-framework/tests/common/mod.rs b/groth16-framework/tests/common/mod.rs index ae0f872fd..2202baaca 100644 --- a/groth16-framework/tests/common/mod.rs +++ b/groth16-framework/tests/common/mod.rs @@ -11,4 +11,4 @@ pub(crate) use context::TestContext; pub(crate) use io::{TestQueryInput, TestQueryOutput}; pub(crate) const NUM_PREPROCESSING_IO: usize = verifiable_db::ivc::NUM_IO; -pub(crate) const NUM_QUERY_IO: usize = verifiable_db::query::PI_LEN::; +pub(crate) const NUM_QUERY_IO: usize = verifiable_db::query::pi_len::(); diff --git a/groth16-framework/tests/common/query.rs b/groth16-framework/tests/common/query.rs index 5f5f80723..4fef29965 100644 --- a/groth16-framework/tests/common/query.rs +++ b/groth16-framework/tests/common/query.rs @@ -12,11 +12,9 @@ use mp2_common::{ }; use plonky2::field::types::PrimeField64; use verifiable_db::{ - query::api::CircuitInput as QueryInput, revelation::{api::CircuitInput, PublicInputs as RevelationPI}, test_utils::{ - TestRevelationData, MAX_NUM_COLUMNS, MAX_NUM_ITEMS_PER_OUTPUT, MAX_NUM_OUTPUTS, - MAX_NUM_PLACEHOLDERS, MAX_NUM_PREDICATE_OPS, MAX_NUM_RESULT_OPS, + TestRevelationData, MAX_NUM_ITEMS_PER_OUTPUT, MAX_NUM_OUTPUTS, MAX_NUM_PLACEHOLDERS, }, }; diff --git a/groth16-framework/tests/query.rs b/groth16-framework/tests/query.rs index 5921d782d..f9964b99a 100644 --- a/groth16-framework/tests/query.rs +++ b/groth16-framework/tests/query.rs @@ -1,5 +1,5 @@ //! Test the Groth16 proving process for the query circuits. - +#![allow(incomplete_features)] #![feature(generic_const_exprs)] mod common; diff --git a/mp2-common/src/lib.rs b/mp2-common/src/lib.rs index f22640f16..49f75be1a 100644 --- a/mp2-common/src/lib.rs +++ b/mp2-common/src/lib.rs @@ -1,5 +1,5 @@ //! Utility functions and gadgets - +#![allow(incomplete_features)] #![feature(generic_const_exprs)] #![feature(generic_arg_infer)] #![feature(const_for)] diff --git a/mp2-common/src/rlp.rs b/mp2-common/src/rlp.rs index 5784eb2a1..741f9e38e 100644 --- a/mp2-common/src/rlp.rs +++ b/mp2-common/src/rlp.rs @@ -17,6 +17,7 @@ const MAX_LEN_BYTES: usize = 2; /// Maximum size a key can have inside a MPT node. /// 33 bytes because key is compacted encoded, so it can add up to 1 byte more. const MAX_ENC_KEY_LEN: usize = 33; + /// Simply the maximum number of nibbles a key can have. pub const MAX_KEY_NIBBLE_LEN: usize = 64; diff --git a/mp2-common/src/types.rs b/mp2-common/src/types.rs index f340efdd3..383cf6110 100644 --- a/mp2-common/src/types.rs +++ b/mp2-common/src/types.rs @@ -58,7 +58,7 @@ pub type PackedMappingKeyTarget = Array; /// Regular hash output function - it can be generated from field elements using /// poseidon with the output serialized or via regular hash functions. -#[derive(Clone, Hash, Default, Debug, Serialize, Deserialize, Deref, PartialEq, Eq)] +#[derive(Clone, Hash, Default, Debug, Serialize, Deserialize, Deref, PartialEq, Eq, Copy)] pub struct HashOutput(pub [u8; 32]); impl AsRef<[u8]> for &HashOutput { fn as_ref(&self) -> &[u8] { diff --git a/mp2-v1/src/final_extraction/api.rs b/mp2-v1/src/final_extraction/api.rs index d5093f5e7..ef152d684 100644 --- a/mp2-v1/src/final_extraction/api.rs +++ b/mp2-v1/src/final_extraction/api.rs @@ -245,9 +245,7 @@ mod tests { use crate::{ final_extraction::{ - base_circuit::{ - test::ProofsPi, BLOCK_SET_NUM_IO, CONTRACT_SET_NUM_IO, VALUE_SET_NUM_IO, - }, + base_circuit::{test::ProofsPi, CONTRACT_SET_NUM_IO, VALUE_SET_NUM_IO}, lengthed_circuit::LENGTH_SET_NUM_IO, }, length_extraction, @@ -255,6 +253,9 @@ mod tests { use super::{CircuitInput, PublicParameters}; + pub(crate) const BLOCK_SET_NUM_IO: usize = + crate::block_extraction::public_inputs::PublicInputs::::TOTAL_LEN; + #[test] fn test_final_extraction_api() { let block_circuit = TestDummyCircuit::::build(); diff --git a/mp2-v1/src/final_extraction/base_circuit.rs b/mp2-v1/src/final_extraction/base_circuit.rs index d2bc6ff44..a2b164a86 100644 --- a/mp2-v1/src/final_extraction/base_circuit.rs +++ b/mp2-v1/src/final_extraction/base_circuit.rs @@ -113,9 +113,6 @@ pub(crate) struct BaseCircuitProofWires { pub(crate) const CONTRACT_SET_NUM_IO: usize = contract_extraction::PublicInputs::::TOTAL_LEN; pub(crate) const VALUE_SET_NUM_IO: usize = values_extraction::PublicInputs::::TOTAL_LEN; -// WARN: clippy is wrong on this one, it is used somewhere else. -pub(crate) const BLOCK_SET_NUM_IO: usize = - block_extraction::public_inputs::PublicInputs::::TOTAL_LEN; #[derive(Clone, Debug)] pub struct BaseCircuitInput { @@ -430,7 +427,7 @@ pub(crate) mod test { pub(crate) fn random() -> Self { let value_h = HashOut::::rand().to_bytes().pack(Endianness::Little); let key = random_vector(64); - let ptr = usize::max_value(); + let ptr = usize::MAX; let value_dv = Point::rand(); let value_dm = Point::rand(); let n = 10; diff --git a/mp2-v1/src/final_extraction/merge_circuit.rs b/mp2-v1/src/final_extraction/merge_circuit.rs index d894bd449..962c601ca 100644 --- a/mp2-v1/src/final_extraction/merge_circuit.rs +++ b/mp2-v1/src/final_extraction/merge_circuit.rs @@ -45,7 +45,7 @@ pub struct MergeTableWires { } impl MergeTable { - pub fn build<'a>( + pub fn build( b: &mut CBuilder, block_pi: &[Target], contract_pi: &[Target], @@ -125,12 +125,6 @@ pub(crate) struct MergeCircuitInput { pub(crate) merge: MergeTable, } -impl MergeCircuitInput { - pub(crate) fn new(base: BaseCircuitProofInputs, merge: MergeTable) -> Self { - Self { base, merge } - } -} - impl CircuitLogicWires for MergeTableRecursiveWires { type CircuitBuilderParams = FinalExtractionBuilderParams; @@ -170,16 +164,10 @@ mod test { use super::*; use base_circuit::test::{ProofsPi, ProofsPiTarget}; use mp2_common::{ - digest::SplitDigestPoint, - group_hashing::{field_hashed_scalar_mul, weierstrass_to_point as wp}, - utils::ToFields, - C, D, F, + digest::SplitDigestPoint, group_hashing::weierstrass_to_point as wp, C, D, F, }; use mp2_test::circuit::{run_circuit, UserCircuit}; - use plonky2::{ - field::types::Sample, - iop::witness::{PartialWitness, WitnessWrite}, - }; + use plonky2::iop::witness::WitnessWrite; use super::MergeTableWires; @@ -221,10 +209,6 @@ mod test { } } - fn random_field_vector(n: usize) -> Vec { - (0..n).map(|_| F::rand()).collect() - } - #[test] fn test_final_merge_circuit() { let pis_a = ProofsPi::random(); diff --git a/mp2-v1/src/indexing/mod.rs b/mp2-v1/src/indexing/mod.rs index 28b3d10cf..90de676e0 100644 --- a/mp2-v1/src/indexing/mod.rs +++ b/mp2-v1/src/indexing/mod.rs @@ -25,7 +25,7 @@ impl LagrangeNode for RowPayload { } fn hash(&self) -> HashOutput { - self.hash.clone() + self.hash } fn min(&self) -> U256 { @@ -37,7 +37,7 @@ impl LagrangeNode for RowPayload { } fn embedded_hash(&self) -> HashOutput { - self.cell_root_hash.clone().unwrap() + self.cell_root_hash.unwrap() } } @@ -47,7 +47,7 @@ impl LagrangeNode for IndexNode { } fn hash(&self) -> HashOutput { - self.node_hash.clone() + self.node_hash } fn min(&self) -> U256 { @@ -59,6 +59,6 @@ impl LagrangeNode for IndexNode { } fn embedded_hash(&self) -> HashOutput { - self.row_tree_hash.clone() + self.row_tree_hash } } diff --git a/mp2-v1/src/lib.rs b/mp2-v1/src/lib.rs index 99dc7dc03..3e9cb8414 100644 --- a/mp2-v1/src/lib.rs +++ b/mp2-v1/src/lib.rs @@ -1,5 +1,6 @@ //! Circuits for v1 of Lagrange Proof Network (LPN) #![allow(incomplete_features)] +#![allow(clippy::large_enum_variant)] // Add this to allow generic const expressions, e.g. `PAD_LEN(NODE_LEN)`. #![feature(generic_const_exprs)] // Add this so we don't need to always specify const generic in generic diff --git a/mp2-v1/src/query/planner.rs b/mp2-v1/src/query/planner.rs index c62c4889e..96ff1e982 100644 --- a/mp2-v1/src/query/planner.rs +++ b/mp2-v1/src/query/planner.rs @@ -59,7 +59,7 @@ where C: ContextProvider, { let (query_for_min, query_for_max) = - bracket_secondary_index(&table_name, settings, primary as Epoch, &bounds); + bracket_secondary_index(&table_name, settings, primary as Epoch, bounds); // try first with lower node than secondary min query bound let to_be_proven_node = @@ -108,13 +108,12 @@ async fn get_successor_node_with_same_value( successor_ctx = row_tree .node_context_at(successor_ctx.left.as_ref().unwrap(), primary as Epoch) .await - .expect( - format!( + .unwrap_or_else(|| { + panic!( "Node context not found for left child of node {:?}", successor_ctx.node_id ) - .as_str(), - ); + }); } Some(successor_ctx) } else { @@ -192,13 +191,12 @@ async fn get_predecessor_node_with_same_value( predecessor_ctx = row_tree .node_context_at(predecessor_ctx.right.as_ref().unwrap(), primary as Epoch) .await - .expect( - format!( + .unwrap_or_else(|| { + panic!( "Node context not found for right child of node {:?}", predecessor_ctx.node_id ) - .as_str(), - ); + }); } Some(predecessor_ctx) } else { @@ -299,11 +297,11 @@ async fn find_node_for_proof( // from the value `value` stored in the node with key `row_key`; the node found is the one to be // employed to generate the non-existence proof let mut successor_ctx = - get_successor_node_with_same_value(&row_tree, &node_ctx, value, primary).await; + get_successor_node_with_same_value(row_tree, &node_ctx, value, primary).await; while successor_ctx.is_some() { node_ctx = successor_ctx.unwrap(); successor_ctx = - get_successor_node_with_same_value(&row_tree, &node_ctx, value, primary).await; + get_successor_node_with_same_value(row_tree, &node_ctx, value, primary).await; } } else { // starting from the node with key `row_key`, we iterate over its predecessor nodes in the tree, @@ -311,11 +309,11 @@ async fn find_node_for_proof( // from the value `value` stored in the node with key `row_key`; the node found is the one to be // employed to generate the non-existence proof let mut predecessor_ctx = - get_predecessor_node_with_same_value(&row_tree, &node_ctx, value, primary).await; + get_predecessor_node_with_same_value(row_tree, &node_ctx, value, primary).await; while predecessor_ctx.is_some() { node_ctx = predecessor_ctx.unwrap(); predecessor_ctx = - get_predecessor_node_with_same_value(&row_tree, &node_ctx, value, primary).await; + get_predecessor_node_with_same_value(row_tree, &node_ctx, value, primary).await; } } diff --git a/mp2-v1/src/values_extraction/api.rs b/mp2-v1/src/values_extraction/api.rs index cbd810010..a1bcaa6a8 100644 --- a/mp2-v1/src/values_extraction/api.rs +++ b/mp2-v1/src/values_extraction/api.rs @@ -133,6 +133,20 @@ pub fn generate_proof( circuit_params.generate_proof(circuit_type)?.serialize() } +pub trait BranchMacroTrait { + fn new(builder: &CircuitWithUniversalVerifierBuilder) -> Self; + + fn circuit_set(&self) -> Vec>; + + fn generate_proof( + &self, + set: &RecursiveCircuits, + branch_node: InputNode, + child_proofs: Vec, + is_simple_aggregation: bool, + ) -> Result; +} + /// generate a macro filling the BranchCircuit structs manually macro_rules! impl_branch_circuits { ($struct_name:ty, $($i:expr),*) => { @@ -152,7 +166,7 @@ macro_rules! impl_branch_circuits { in combination with the node input length."] pub type $struct_name = [< $struct_name GenericNodeLen>]; - impl $struct_name { + impl BranchMacroTrait for $struct_name { fn new(builder: &CircuitWithUniversalVerifierBuilder) -> Self { $struct_name { $( @@ -163,11 +177,11 @@ macro_rules! impl_branch_circuits { } /// Returns the set of circuits to be fed to the recursive framework fn circuit_set(&self) -> Vec> { - let mut arr = Vec::new(); - $( - arr.push(self.[< b $i >].circuit_data().verifier_only.circuit_digest); - )+ - arr + + vec![$( + self.[< b $i >].circuit_data().verifier_only.circuit_digest, + )+] + } /// generates a proof from the inputs stored in `branch`. Depending on the size of the node, /// and the number of children proofs, it selects the right specialized circuit to generate the proof. @@ -413,12 +427,6 @@ mod tests { mapping_key: Option>, } - impl TestData { - fn is_simple_slot(&self) -> bool { - self.mapping_key.is_none() - } - } - #[test] fn test_values_extraction_single_variable_apis() { test_apis(true); diff --git a/mp2-v1/tests/common/bindings/mod.rs b/mp2-v1/tests/common/bindings/mod.rs index 81d6a1873..c8e26af41 100644 --- a/mp2-v1/tests/common/bindings/mod.rs +++ b/mp2-v1/tests/common/bindings/mod.rs @@ -1,4 +1,4 @@ -#![allow(unused_imports, clippy::all, rustdoc::all)] +#![allow(unused_imports, clippy::all, rustdoc::all, warnings)] //! This module contains the sol! generated bindings for solidity contracts. //! This is autogenerated code. //! Do not manually edit these files. diff --git a/mp2-v1/tests/common/cases/indexing.rs b/mp2-v1/tests/common/cases/indexing.rs index b25aa76d7..41bdd24db 100644 --- a/mp2-v1/tests/common/cases/indexing.rs +++ b/mp2-v1/tests/common/cases/indexing.rs @@ -119,17 +119,17 @@ impl TableIndexing { let single_columns = SINGLE_SLOTS .iter() .enumerate() - .filter_map(|(i, slot)| { + .map(|(i, slot)| { let identifier = identifier_single_var_column(*slot, contract_address, chain_id, vec![]); - Some(TableColumn { + TableColumn { name: format!("column_{}", i), identifier, index: IndexType::None, // ALL single columns are "multiplier" since we do tableA * D(tableB), i.e. all // entries of table A are repeated for each entry of table B. multiplier: true, - }) + } }) .collect::>(); let mapping_column = vec![TableColumn { @@ -591,10 +591,9 @@ impl TableIndexing { debug!( " CONTRACT storage root pis.storage_root() {:?}", hex::encode( - &pis.root_hash_field() + pis.root_hash_field() .into_iter() - .map(|u| u.to_be_bytes()) - .flatten() + .flat_map(|u| u.to_be_bytes()) .collect::>() ) ); diff --git a/mp2-v1/tests/common/cases/planner.rs b/mp2-v1/tests/common/cases/planner.rs index 6fccfdf8d..7b9bf58b4 100644 --- a/mp2-v1/tests/common/cases/planner.rs +++ b/mp2-v1/tests/common/cases/planner.rs @@ -53,9 +53,9 @@ pub trait TreeInfo { proof: Vec, ) -> Result<()>; - async fn load_or_prove_embedded<'a>( + async fn load_or_prove_embedded( &self, - planner: &mut QueryPlanner<'a>, + planner: &mut QueryPlanner<'_>, primary: BlockPrimaryIndex, k: &K, v: &V, @@ -116,9 +116,9 @@ impl TreeInfo> ctx.storage.store_proof(proof_key, proof) } - async fn load_or_prove_embedded<'a>( + async fn load_or_prove_embedded( &self, - planner: &mut QueryPlanner<'a>, + planner: &mut QueryPlanner<'_>, primary: BlockPrimaryIndex, k: &RowTreeKey, _v: &RowPayload, @@ -166,7 +166,7 @@ impl<'a> RowInfo<'a> { } } -impl<'b> TreeInfo> for RowInfo<'b> { +impl TreeInfo> for RowInfo<'_> { fn is_row_tree(&self) -> bool { true } @@ -210,9 +210,9 @@ impl<'b> TreeInfo> for RowInfo<'b> { ctx.storage.store_proof(proof_key, proof) } - async fn load_or_prove_embedded<'a>( + async fn load_or_prove_embedded( &self, - planner: &mut QueryPlanner<'a>, + planner: &mut QueryPlanner<'_>, primary: BlockPrimaryIndex, k: &RowTreeKey, _v: &RowPayload, @@ -284,9 +284,9 @@ impl TreeInfo> ctx.storage.store_proof(proof_key, proof) } - async fn load_or_prove_embedded<'a>( + async fn load_or_prove_embedded( &self, - planner: &mut QueryPlanner<'a>, + planner: &mut QueryPlanner<'_>, primary: BlockPrimaryIndex, k: &BlockPrimaryIndex, v: &IndexNode, @@ -318,7 +318,7 @@ impl<'a> IndexInfo<'a> { } } -impl<'b> TreeInfo> for IndexInfo<'b> { +impl TreeInfo> for IndexInfo<'_> { fn is_row_tree(&self) -> bool { false } @@ -355,9 +355,9 @@ impl<'b> TreeInfo> for IndexInfo ctx.storage.store_proof(proof_key, proof) } - async fn load_or_prove_embedded<'a>( + async fn load_or_prove_embedded( &self, - planner: &mut QueryPlanner<'a>, + planner: &mut QueryPlanner<'_>, primary: BlockPrimaryIndex, k: &BlockPrimaryIndex, v: &IndexNode, @@ -375,11 +375,10 @@ impl<'b> TreeInfo> for IndexInfo } async fn load_or_prove_embedded_index< - 'a, T: TreeInfo>, >( info: &T, - planner: &mut QueryPlanner<'a>, + planner: &mut QueryPlanner<'_>, primary: BlockPrimaryIndex, k: &BlockPrimaryIndex, v: &IndexNode, diff --git a/mp2-v1/tests/common/cases/query/aggregated_queries.rs b/mp2-v1/tests/common/cases/query/aggregated_queries.rs index 3757673f3..9234fdd3f 100644 --- a/mp2-v1/tests/common/cases/query/aggregated_queries.rs +++ b/mp2-v1/tests/common/cases/query/aggregated_queries.rs @@ -47,14 +47,13 @@ use mp2_v1::{ }; use parsil::{ assembler::{DynamicCircuitPis, StaticCircuitPis}, - parse_and_validate, queries::{core_keys_for_index_tree, core_keys_for_row_tree}, ParsilSettings, DEFAULT_MAX_BLOCK_PLACEHOLDER, DEFAULT_MIN_BLOCK_PLACEHOLDER, }; use ryhope::{ storage::{ updatetree::{Next, UpdateTree, WorkplanItem}, - EpochKvStorage, RoEpochKvStorage, TreeTransactionalStorage, WideLineage, + EpochKvStorage, RoEpochKvStorage, TreeTransactionalStorage, }, tree::NodeContext, Epoch, NodePayload, @@ -71,20 +70,19 @@ use verifiable_db::{ }, }, revelation::PublicInputs, - row_tree, }; use super::{ - GlobalCircuitInput, QueryCircuitInput, RevelationCircuitInput, INDEX_TREE_MAX_DEPTH, - MAX_NUM_COLUMNS, MAX_NUM_ITEMS_PER_OUTPUT, MAX_NUM_OUTPUTS, MAX_NUM_PLACEHOLDERS, - MAX_NUM_PREDICATE_OPS, MAX_NUM_RESULT_OPS, ROW_TREE_MAX_DEPTH, + GlobalCircuitInput, QueryCircuitInput, RevelationCircuitInput, MAX_NUM_COLUMNS, + MAX_NUM_ITEMS_PER_OUTPUT, MAX_NUM_OUTPUTS, MAX_NUM_PLACEHOLDERS, MAX_NUM_PREDICATE_OPS, + MAX_NUM_RESULT_OPS, }; pub type RevelationPublicInputs<'a> = PublicInputs<'a, F, MAX_NUM_OUTPUTS, MAX_NUM_ITEMS_PER_OUTPUT, MAX_NUM_PLACEHOLDERS>; /// Execute a query to know all the touched rows, and then call the universal circuit on all rows -#[warn(clippy::too_many_arguments)] +#[allow(clippy::too_many_arguments)] pub(crate) async fn prove_query( ctx: &mut TestContext, table: &Table, @@ -99,7 +97,7 @@ pub(crate) async fn prove_query( .row .wide_lineage_between( table.row.current_epoch(), - &core_keys_for_row_tree(&query.query, &settings, &pis.bounds, &query.placeholders)?, + &core_keys_for_row_tree(&query.query, settings, &pis.bounds, &query.placeholders)?, (query.min_block as Epoch, query.max_block as Epoch), ) .await?; @@ -215,9 +213,9 @@ pub(crate) async fn prove_query( // get `StaticPublicInputs`, i.e., the data about the query available only at query registration time, // to check the public inputs - let pis = parsil::assembler::assemble_static(&parsed, &settings)?; + let pis = parsil::assembler::assemble_static(&parsed, settings)?; // get number of matching rows - let mut exec_query = parsil::executor::generate_query_keys(&mut parsed, &settings)?; + let mut exec_query = parsil::executor::generate_query_keys(&mut parsed, settings)?; let query_params = exec_query.convert_placeholders(&query.placeholders); let num_touched_rows = execute_row_query( &table.db_pool, @@ -281,7 +279,7 @@ async fn prove_revelation( Ok(proof) } -#[warn(clippy::too_many_arguments)] +#[allow(clippy::too_many_arguments)] pub(crate) fn check_final_outputs( revelation_proof: Vec, ctx: &TestContext, @@ -759,8 +757,8 @@ pub fn generate_non_existence_proof( } /// Generate a proof for a node of the index tree which is outside of the query bounds -async fn prove_non_existence_index<'a>( - planner: &mut QueryPlanner<'a>, +async fn prove_non_existence_index( + planner: &mut QueryPlanner<'_>, primary: BlockPrimaryIndex, ) -> Result<()> { let tree = &planner.table.index; @@ -794,8 +792,8 @@ async fn prove_non_existence_index<'a>( Ok(()) } -pub async fn prove_non_existence_row<'a>( - planner: &mut QueryPlanner<'a>, +pub async fn prove_non_existence_row( + planner: &mut QueryPlanner<'_>, primary: BlockPrimaryIndex, ) -> Result<()> { let (chosen_node, plan) = find_row_node_for_non_existence( @@ -803,7 +801,7 @@ pub async fn prove_non_existence_row<'a>( planner.table.public_name.clone(), &planner.table.db_pool, primary, - &planner.settings, + planner.settings, &planner.pis.bounds, ) .await?; @@ -1349,7 +1347,6 @@ async fn check_correct_cells_tree( let local_cells = all_cells.to_vec(); let expected_cells_root = payload .cell_root_hash - .clone() .unwrap_or(HashOutput::from(*empty_poseidon_hash())); let mut tree = indexing::cell::new_tree().await; tree.in_transaction(|t| { diff --git a/mp2-v1/tests/common/cases/query/mod.rs b/mp2-v1/tests/common/cases/query/mod.rs index 208504d0b..9e40f47d5 100644 --- a/mp2-v1/tests/common/cases/query/mod.rs +++ b/mp2-v1/tests/common/cases/query/mod.rs @@ -205,13 +205,13 @@ async fn test_query_mapping( parsed, &settings, res, - table_hash.clone(), + *table_hash, pis, ) .await } Output::NoAggregation => { - prove_no_aggregation_query(parsed, &table_hash, &mut planner, res).await + prove_no_aggregation_query(parsed, table_hash, &mut planner, res).await } } } @@ -223,9 +223,7 @@ pub enum SqlType { impl SqlType { pub fn extract(&self, row: &PsqlRow, idx: usize) -> Option { match self { - SqlType::Numeric => row - .get::<_, Option>(idx) - .map(|num| SqlReturn::Numeric(num)), + SqlType::Numeric => row.get::<_, Option>(idx).map(SqlReturn::Numeric), } } } @@ -236,11 +234,11 @@ pub enum SqlReturn { } fn is_empty_result(rows: &[PsqlRow], types: SqlType) -> bool { - if rows.len() == 0 { + if rows.is_empty() { return true; } let columns = rows.first().as_ref().unwrap().columns(); - if columns.len() == 0 { + if columns.is_empty() { return true; } for row in rows { @@ -252,7 +250,7 @@ fn is_empty_result(rows: &[PsqlRow], types: SqlType) -> bool { } fn print_vec_sql_rows(rows: &[PsqlRow], types: SqlType) { - if rows.len() == 0 { + if rows.is_empty() { println!("no rows returned"); return; } diff --git a/mp2-v1/tests/common/cases/query/simple_select_queries.rs b/mp2-v1/tests/common/cases/query/simple_select_queries.rs index a18adfc58..e29226a8b 100644 --- a/mp2-v1/tests/common/cases/query/simple_select_queries.rs +++ b/mp2-v1/tests/common/cases/query/simple_select_queries.rs @@ -47,13 +47,13 @@ use crate::common::{ use super::QueryCooking; -pub(crate) async fn prove_query<'a>( +pub(crate) async fn prove_query( mut parsed: Query, table_hash: &MetadataHash, - planner: &mut QueryPlanner<'a>, + planner: &mut QueryPlanner<'_>, results: Vec, ) -> Result<()> { - let mut exec_query = generate_query_execution_with_keys(&mut parsed, &planner.settings)?; + let mut exec_query = generate_query_execution_with_keys(&mut parsed, planner.settings)?; let query_params = exec_query.convert_placeholders(&planner.query.placeholders); let res = execute_row_query( &planner.table.db_pool, @@ -71,7 +71,7 @@ pub(crate) async fn prove_query<'a>( // all the other items are query results let result = (2..row.len()) .filter_map(|i| { - SqlType::Numeric.extract(&row, i).map(|res| match res { + SqlType::Numeric.extract(row, i).map(|res| match res { SqlReturn::Numeric(uint) => uint, }) }) @@ -101,7 +101,7 @@ pub(crate) async fn prove_query<'a>( &planner.columns, epoch as BlockPrimaryIndex, &key, - &planner.pis, + planner.pis, &planner.query, ) .await?; @@ -150,14 +150,14 @@ pub(crate) async fn prove_query<'a>( let pis = parsil::assembler::assemble_static(&parsed, planner.settings)?; check_final_outputs( final_proof, - &planner.ctx, - &planner.table, + planner.ctx, + planner.table, &planner.query, &pis, current_epoch, num_matching_rows, results, - table_hash.clone(), + *table_hash, )?; info!("Revelation done!"); Ok(()) diff --git a/mp2-v1/tests/common/cases/table_source.rs b/mp2-v1/tests/common/cases/table_source.rs index fdb742326..01f0497d6 100644 --- a/mp2-v1/tests/common/cases/table_source.rs +++ b/mp2-v1/tests/common/cases/table_source.rs @@ -428,10 +428,9 @@ impl SingleValuesExtractionArgs { debug!( "[--] SINGLE FINAL ROOT HASH --> {:?} ", hex::encode( - &pi.root_hash() + pi.root_hash() .into_iter() - .map(|u| u.to_be_bytes()) - .flatten() + .flat_map(|u| u.to_be_bytes()) .collect::>() ) ); @@ -671,10 +670,9 @@ impl MappingValuesExtractionArgs { debug!( "[--] MAPPING FINAL ROOT HASH --> {:?} ", hex::encode( - &pi.root_hash() + pi.root_hash() .into_iter() - .map(|u| u.to_be_bytes()) - .flatten() + .flat_map(|u| u.to_be_bytes()) .collect::>() ) ); @@ -815,7 +813,7 @@ impl MergeSource { // now we merge all the cells change from the single contract to the mapping contract update_mapping .into_iter() - .map(|um| { + .flat_map(|um| { let refm = &um; // for each update from mapping, we "merge" all the updates from single, i.e. since // single is the multiplier table @@ -855,9 +853,7 @@ impl MergeSource { TableRowUpdate::Insertion(cella,seca.clone()) } }).collect::>() - }) - .flatten() - .collect() + }).collect() } pub async fn random_contract_update( @@ -888,7 +884,7 @@ impl MergeSource { .query_mpt_proof(&query, BlockNumberOrTag::Number(ctx.block_number().await)) .await; let current_value = response.storage_proof[0].value; - let current_key = U256::from_be_slice(&mk); + let current_key = U256::from_be_slice(mk); let entry = UniqueMappingEntry::new(¤t_key, ¤t_value); // create one update for each update of the first table (note again there // should be only one update since it's single var) diff --git a/mp2-v1/tests/common/celltree.rs b/mp2-v1/tests/common/celltree.rs index 068cbdd99..9f5019526 100644 --- a/mp2-v1/tests/common/celltree.rs +++ b/mp2-v1/tests/common/celltree.rs @@ -54,7 +54,7 @@ impl TestContext { while let Some(Next::Ready(wk)) = workplan.next() { let k = wk.k(); - let (context, cell) = tree.fetch_with_context(&k).await; + let (context, cell) = tree.fetch_with_context(k).await; let column = table.columns.column_info(cell.identifier()); let proof = if context.is_leaf() { debug!( diff --git a/mp2-v1/tests/common/rowtree.rs b/mp2-v1/tests/common/rowtree.rs index 039cc829e..cdd02e4f9 100644 --- a/mp2-v1/tests/common/rowtree.rs +++ b/mp2-v1/tests/common/rowtree.rs @@ -118,7 +118,7 @@ impl TestContext { let cell_root_hash_from_proof = cells_tree::extract_hash_from_proof(&cell_tree_proof) .unwrap() .to_bytes(); - let cell_root_hash_from_row = row.cell_root_hash.clone(); + let cell_root_hash_from_row = row.cell_root_hash; assert_eq!( hex::encode(cell_root_hash_from_proof.clone()), hex::encode(cell_root_hash_from_row.unwrap().0), @@ -305,7 +305,7 @@ impl TestContext { .get_proof_exact(&ProofKey::Row(root_proof_key.clone())) .unwrap(); let root_row = table.row.root_data().await.unwrap(); - let tree_hash = root_row.hash.clone(); + let tree_hash = root_row.hash; let proved_hash = row_tree_proof_to_hash(&row_tree_proof); assert_eq!( diff --git a/mp2-v1/tests/common/table.rs b/mp2-v1/tests/common/table.rs index 5420dd31e..19a72f30e 100644 --- a/mp2-v1/tests/common/table.rs +++ b/mp2-v1/tests/common/table.rs @@ -6,7 +6,7 @@ use futures::{ FutureExt, }; use itertools::Itertools; -use log::{debug, info}; +use log::debug; use mp2_v1::indexing::{ block::{BlockPrimaryIndex, BlockTreeKey}, cell::{self, Cell, CellTreeKey, MerkleCell, MerkleCellTree}, @@ -87,7 +87,7 @@ impl TableColumns { .iter() .chain(once(&self.secondary)) .find(|c| c.identifier == identifier) - .expect(&format!("can't find cell from identifier {}", identifier)) + .unwrap_or_else(|| panic!("can't find cell from identifier {}", identifier)) .clone() } pub fn ordered_cells( @@ -459,7 +459,7 @@ impl Table { .index .in_transaction(|t| { async move { - t.store(updates.added_index.0 as usize, updates.added_index.1) + t.store(updates.added_index.0, updates.added_index.1) .await?; Ok(()) } diff --git a/mp2-v1/tests/integrated_tests.rs b/mp2-v1/tests/integrated_tests.rs index 3b70c4695..8ce01bcb4 100644 --- a/mp2-v1/tests/integrated_tests.rs +++ b/mp2-v1/tests/integrated_tests.rs @@ -20,9 +20,9 @@ use common::{ cases::{ indexing::{ChangeType, UpdateType}, query::{ - test_query, GlobalCircuitInput, QueryCircuitInput, RevelationCircuitInput, - MAX_NUM_COLUMNS, MAX_NUM_ITEMS_PER_OUTPUT, MAX_NUM_OUTPUTS, MAX_NUM_PLACEHOLDERS, - MAX_NUM_PREDICATE_OPS, MAX_NUM_RESULT_OPS, + test_query, GlobalCircuitInput, RevelationCircuitInput, MAX_NUM_COLUMNS, + MAX_NUM_ITEMS_PER_OUTPUT, MAX_NUM_OUTPUTS, MAX_NUM_PLACEHOLDERS, MAX_NUM_PREDICATE_OPS, + MAX_NUM_RESULT_OPS, }, TableIndexing, }, diff --git a/parsil/src/executor.rs b/parsil/src/executor.rs index c7fda830f..90b13dcfe 100644 --- a/parsil/src/executor.rs +++ b/parsil/src/executor.rs @@ -692,19 +692,19 @@ impl<'a, C: ContextProvider> ExecutorWithKey<'a, C> { } } -impl<'a, C: ContextProvider> AstMutator for ExecutorWithKey<'a, C> { +impl AstMutator for ExecutorWithKey<'_, C> { type Error = anyhow::Error; fn post_expr(&mut self, expr: &mut Expr) -> Result<()> { let mut executor = Executor { - settings: &mut self.settings, + settings: self.settings, }; executor.post_expr(expr) } fn post_table_factor(&mut self, table_factor: &mut TableFactor) -> Result<()> { let mut key_fetcher = KeyFetcher { - settings: &mut self.settings, + settings: self.settings, }; key_fetcher.post_table_factor(table_factor) } @@ -779,7 +779,7 @@ impl<'a, C: ContextProvider> AstMutator for ExecutorWithKey<'a, C> { match item { SelectItem::Wildcard(_) => replace_wildcard() .into_iter() - .map(|expr| SelectItem::UnnamedExpr(expr)) + .map(SelectItem::UnnamedExpr) .collect(), _ => vec![item.clone()], } diff --git a/parsil/src/expand.rs b/parsil/src/expand.rs index 8d063bb97..cf8a923fd 100644 --- a/parsil/src/expand.rs +++ b/parsil/src/expand.rs @@ -18,7 +18,7 @@ struct Expander<'a, C: ContextProvider> { settings: &'a ParsilSettings, } -impl<'a, C: ContextProvider> AstMutator for Expander<'a, C> { +impl AstMutator for Expander<'_, C> { type Error = anyhow::Error; fn pre_expr(&mut self, e: &mut Expr) -> anyhow::Result<()> { diff --git a/parsil/src/symbols.rs b/parsil/src/symbols.rs index ac552715d..52c6950b1 100644 --- a/parsil/src/symbols.rs +++ b/parsil/src/symbols.rs @@ -369,6 +369,12 @@ pub struct ScopeTable, } +impl Default for ScopeTable { + fn default() -> Self { + Self::new() + } +} + impl ScopeTable { pub fn new() -> Self { ScopeTable { diff --git a/parsil/src/tests.rs b/parsil/src/tests.rs index 6b574f92b..3cda18d21 100644 --- a/parsil/src/tests.rs +++ b/parsil/src/tests.rs @@ -1,3 +1,4 @@ +#![allow(clippy::single_element_loop)] use crate::assembler::{assemble_dynamic, DynamicCircuitPis}; use crate::isolator; use crate::utils::ParsilSettingsBuilder; @@ -10,6 +11,8 @@ use anyhow::Result; use verifiable_db::query::universal_circuit::universal_circuit_inputs::Placeholders; /// NOTE: queries that may bother us in the future +// CHORE: Remove this when relevant PR is merged +#[allow(dead_code)] const CAREFUL: &[&str] = &[ // What to do if b.t is longer than a.x? "SELECT x, (SELECT t AS tt FROM b) FROM a;", @@ -164,8 +167,8 @@ fn isolation() { .build() .unwrap(); - let mut query = parse_and_validate(q, &settings).unwrap(); - isolator::isolate_with(&mut query, &settings, lo_sec, hi_sec) + let query = parse_and_validate(q, &settings).unwrap(); + isolator::isolate_with(&query, &settings, lo_sec, hi_sec) .unwrap() .to_string() } diff --git a/parsil/src/utils.rs b/parsil/src/utils.rs index f328ace39..d31051eed 100644 --- a/parsil/src/utils.rs +++ b/parsil/src/utils.rs @@ -192,7 +192,7 @@ pub fn parse_and_validate( settings: &ParsilSettings, ) -> anyhow::Result { let mut query = parser::parse(query)?; - expand::expand(&settings, &mut query)?; + expand::expand(settings, &mut query)?; placeholders::validate(settings, &query)?; validate::validate(settings, &query)?; diff --git a/recursion-framework/src/circuit_builder.rs b/recursion-framework/src/circuit_builder.rs index 0ebb0668c..fc945e54b 100644 --- a/recursion-framework/src/circuit_builder.rs +++ b/recursion-framework/src/circuit_builder.rs @@ -4,7 +4,7 @@ use plonky2::{ plonk::{ circuit_builder::CircuitBuilder, circuit_data::{CircuitConfig, CircuitData, VerifierOnlyCircuitData}, - config::{AlgebraicHasher, GenericConfig, Hasher}, + config::{AlgebraicHasher, GenericConfig}, proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget}, }, }; @@ -55,7 +55,7 @@ pub trait CircuitLogicWires, const D: usize, const N /// This method, given a `ProofWithPublicInputsTarget` that should represent a proof generated with /// a `CircuitWithUniversalVerifier` circuit implementing the additional circuit logid specified by `Self`, /// returns the set of `Self::NUM_PUBLIC_INPUTS` targets corresponding to all the public inputs of the - /// proof except for the ones representing the digest of the circuit set + /// proof except for the ones representing the digest of the circuit set fn public_input_targets(proof: &ProofWithPublicInputsTarget) -> &[Target] where [(); Self::NUM_PUBLIC_INPUTS]:, @@ -90,7 +90,6 @@ impl, const D: usize, const NUM_PUBLIC_INPUTS: usize ) -> Self where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { let verifier_builder = UniversalVerifierBuilder::new::(config.clone(), circuit_set_size); Self { @@ -114,7 +113,6 @@ impl, const D: usize, const NUM_PUBLIC_INPUTS: usize ) -> CircuitWithUniversalVerifier where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { self.build_circuit_internal(&self.config, input_parameters) } @@ -132,7 +130,6 @@ impl, const D: usize, const NUM_PUBLIC_INPUTS: usize ) -> CircuitWithUniversalVerifier where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { self.build_circuit_internal(&custom_config, input_parameters) } @@ -148,7 +145,6 @@ impl, const D: usize, const NUM_PUBLIC_INPUTS: usize ) -> CircuitWithUniversalVerifier where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { let mut builder = CircuitBuilder::::new(config.clone()); let circuit_set_target = CircuitSetTarget::build_target(&mut builder); @@ -156,7 +152,7 @@ impl, const D: usize, const NUM_PUBLIC_INPUTS: usize ..NUM_VERIFIERS) .map(|_| { self.verifier_builder - .universal_verifier_circuit(&mut builder, &circuit_set_target) + .universal_verifier_circuit::(&mut builder, &circuit_set_target) }) .collect::>() .try_into() @@ -274,7 +270,6 @@ impl< > CircuitWithUniversalVerifier where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { /// Generate a proof for this instance of a `CircuitWithUniversalVerifier, employing the provided inputs /// to compute the witness data necessary to generate the proof. More specifically: @@ -386,7 +381,6 @@ pub(crate) mod tests { } impl< - 'a, F: SerializableRichField, const D: usize, const NUM_VERIFIERS: usize, @@ -457,7 +451,6 @@ pub(crate) mod tests { } impl< - 'a, F: SerializableRichField, const D: usize, const NUM_VERIFIERS: usize, @@ -505,7 +498,6 @@ pub(crate) mod tests { config: Option, ) where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { const INPUT_SIZE: usize = 8; const NUM_PUBLIC_INPUTS: usize = NUM_PUBLIC_INPUTS_TEST_CIRCUITS; diff --git a/recursion-framework/src/framework.rs b/recursion-framework/src/framework.rs index b6f152a29..7225f73f7 100644 --- a/recursion-framework/src/framework.rs +++ b/recursion-framework/src/framework.rs @@ -5,7 +5,7 @@ use plonky2::{ plonk::{ circuit_builder::CircuitBuilder, circuit_data::{CircuitConfig, VerifierCircuitTarget, VerifierOnlyCircuitData}, - config::{AlgebraicHasher, GenericConfig, Hasher}, + config::{AlgebraicHasher, GenericConfig}, proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget}, }, }; @@ -42,7 +42,7 @@ where F: SerializableRichField, C: GenericConfig, C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, + CLW: CircuitLogicWires, { fn get_verifier_data(&self) -> &VerifierOnlyCircuitData { @@ -77,7 +77,6 @@ impl, C: GenericConfig + 'static, const D: RecursiveCircuits where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { /// Instantiate a `RecursiveCircuits` data structure employing the list of circuits provided as input pub fn new(circuits: Vec + '_>>) -> Self { @@ -163,7 +162,6 @@ impl RecursiveCircuitsVerifierTarget { ) -> Result<()> where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { self.0.set_target( pw, @@ -209,11 +207,10 @@ impl< pub fn new(config: CircuitConfig, recursive_circuits_set: &RecursiveCircuits) -> Self where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { let circuit_set_size = recursive_circuits_set.circuit_set.circuit_set_size(); let gadget_builder = - UniversalVerifierBuilder::::new(config, circuit_set_size); + UniversalVerifierBuilder::::new::(config, circuit_set_size); Self { gadget_builder, recursive_circuits: recursive_circuits_set.clone(), @@ -228,7 +225,6 @@ impl< ) -> RecursiveCircuitsVerifierTarget where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { let circuit_set_target = CircuitSetTarget::from_circuit_set_digest( builder, @@ -236,7 +232,7 @@ impl< ); RecursiveCircuitsVerifierTarget( self.gadget_builder - .universal_verifier_circuit(builder, &circuit_set_target), + .universal_verifier_circuit::(builder, &circuit_set_target), ) } @@ -249,7 +245,6 @@ impl< ) -> ProofWithPublicInputsTarget where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { let circuit_set_target = CircuitSetTarget::from_circuit_set_digest( builder, @@ -328,7 +323,6 @@ pub(crate) mod tests { > CircuitLogicWires for VerifierCircuitWires where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { type CircuitBuilderParams = RecursiveCircuitsVerifierGagdet; @@ -377,7 +371,6 @@ pub(crate) mod tests { > CircuitLogicWires for VerifierCircuitFixedWires where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { type CircuitBuilderParams = ( RecursiveCircuitsVerifierGagdet, @@ -440,7 +433,6 @@ pub(crate) mod tests { > TestRecursiveCircuits where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { fn new() -> Self { const CIRCUIT_SET_SIZE: usize = 5; diff --git a/recursion-framework/src/framework_testing.rs b/recursion-framework/src/framework_testing.rs index 214fce4ec..a1e7a9dc2 100644 --- a/recursion-framework/src/framework_testing.rs +++ b/recursion-framework/src/framework_testing.rs @@ -7,7 +7,7 @@ use plonky2::{ plonk::{ circuit_builder::CircuitBuilder, circuit_data::{CircuitConfig, VerifierOnlyCircuitData}, - config::{AlgebraicHasher, GenericConfig, Hasher}, + config::{AlgebraicHasher, GenericConfig}, proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget}, }, }; @@ -96,7 +96,6 @@ pub fn new_universal_circuit_builder_for_testing< ) -> CircuitWithUniversalVerifierBuilder where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { CircuitWithUniversalVerifierBuilder::::new::( config, @@ -112,7 +111,6 @@ impl< > Default for TestingRecursiveCircuits where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { /// Build a `TestingRecursiveCircuits` for an empty set of circuits and employing /// `standard_recursion_config` as the circuit configuration @@ -132,7 +130,6 @@ impl< > TestingRecursiveCircuits where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { /// Instantiate a `TestingRecursiveCircuits` for the set of circuits given by `circuits`; `builder` must be instantiated /// by employing the `new_universal_circuit_builder_for_testing` utility function @@ -266,7 +263,6 @@ mod tests { >() where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { const INPUT_SIZE: usize = 8; let config = CircuitConfig::standard_recursion_config(); diff --git a/recursion-framework/src/lib.rs b/recursion-framework/src/lib.rs index 0aea2ae4a..9a3517677 100644 --- a/recursion-framework/src/lib.rs +++ b/recursion-framework/src/lib.rs @@ -1,4 +1,5 @@ // #![warn(missing_docs)] +#![allow(incomplete_features)] #![feature(generic_const_exprs)] //! This crate provides a framework to build circuits that needs to recursively verify proofs diff --git a/recursion-framework/src/universal_verifier_gadget/circuit_set.rs b/recursion-framework/src/universal_verifier_gadget/circuit_set.rs index 84e82a948..ee5a6c189 100644 --- a/recursion-framework/src/universal_verifier_gadget/circuit_set.rs +++ b/recursion-framework/src/universal_verifier_gadget/circuit_set.rs @@ -173,7 +173,6 @@ pub(crate) struct CircuitSet< impl, C: GenericConfig, const D: usize> CircuitSet where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { pub(crate) fn build_circuit_set(circuit_digests: Vec<>::Hash>) -> Self { let (circuit_digests_to_leaf_indexes, mut leaves): (HashMap, usize>, Vec<_>) = @@ -262,15 +261,13 @@ where impl, C: GenericConfig, const D: usize> Default for CircuitSetDigest -where - [(); C::Hasher::HASH_SIZE]:, { fn default() -> Self { Self(MerkleCap( (0..(1 << CIRCUIT_SET_CAP_HEIGHT)) .map(|_| { <>::Hasher as Hasher>::Hash::from_bytes( - &[0u8; <>::Hasher as Hasher>::HASH_SIZE], + vec![0u8; >::HASH_SIZE].as_slice(), ) }) .collect::>(), diff --git a/recursion-framework/src/universal_verifier_gadget/mod.rs b/recursion-framework/src/universal_verifier_gadget/mod.rs index cfd00d9bd..00c008064 100644 --- a/recursion-framework/src/universal_verifier_gadget/mod.rs +++ b/recursion-framework/src/universal_verifier_gadget/mod.rs @@ -5,7 +5,7 @@ use plonky2::{ plonk::{ circuit_builder::CircuitBuilder, circuit_data::{CircuitConfig, CircuitData, CommonCircuitData}, - config::{AlgebraicHasher, GenericConfig, Hasher}, + config::{AlgebraicHasher, GenericConfig}, }, }; @@ -74,7 +74,6 @@ pub(crate) fn build_data_for_universal_verifier< ) -> CommonCircuitData where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { let num_public_inputs = num_public_inputs + CircuitSetTarget::num_targets::(config.clone()); diff --git a/recursion-framework/src/universal_verifier_gadget/verifier_gadget.rs b/recursion-framework/src/universal_verifier_gadget/verifier_gadget.rs index 65f8cc11b..af2a72710 100644 --- a/recursion-framework/src/universal_verifier_gadget/verifier_gadget.rs +++ b/recursion-framework/src/universal_verifier_gadget/verifier_gadget.rs @@ -7,7 +7,7 @@ use plonky2::{ circuit_data::{ CircuitConfig, CommonCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData, }, - config::{AlgebraicHasher, GenericConfig, Hasher}, + config::{AlgebraicHasher, GenericConfig}, proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget}, }, }; @@ -56,7 +56,6 @@ impl UniversalVerifierTarget { ) -> Result<()> where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { pw.set_proof_with_pis_target(&self.verified_proof, proof); pw.set_verifier_data_target(&self.verifier_data, verifier_data); @@ -92,7 +91,6 @@ impl, const D: usize, const NUM_PUBLIC_INPUTS: usize ) -> Self where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { let rec_data = build_data_for_universal_verifier::(config, NUM_PUBLIC_INPUTS); Self { @@ -114,7 +112,6 @@ impl, const D: usize, const NUM_PUBLIC_INPUTS: usize ) -> ProofWithPublicInputsTarget where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { let proof = builder.add_virtual_proof_with_pis(&self.rec_data); builder.verify_proof::(&proof, verifier_data, &self.rec_data); @@ -143,7 +140,6 @@ impl, const D: usize, const NUM_PUBLIC_INPUTS: usize ) -> UniversalVerifierTarget where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { // allocate verifier data targets let verifier_data = VerifierCircuitTarget { @@ -221,7 +217,6 @@ mod tests { > TestCircuitForUniversalVerifier where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { fn build_circuit( config: CircuitConfig, @@ -317,17 +312,16 @@ mod tests { > CircuitWithUniversalVerifier where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { fn build_circuit(config: CircuitConfig, circuit_set_size: usize) -> Self { - let builder = UniversalVerifierBuilder::::new( + let builder = UniversalVerifierBuilder::::new::( config.clone(), circuit_set_size, ); let mut circuit_builder = CircuitBuilder::::new(config.clone()); let circuit_set_target = CircuitSetTarget::build_target(&mut circuit_builder); let verifier_targets = - builder.universal_verifier_circuit(&mut circuit_builder, &circuit_set_target); + builder.universal_verifier_circuit::(&mut circuit_builder, &circuit_set_target); let proof_t = verifier_targets.get_proof_target(); let input_targets = as CircuitLogicWires>::circuit_logic( diff --git a/recursion-framework/src/universal_verifier_gadget/wrap_circuit.rs b/recursion-framework/src/universal_verifier_gadget/wrap_circuit.rs index 010a6338e..b84650c47 100644 --- a/recursion-framework/src/universal_verifier_gadget/wrap_circuit.rs +++ b/recursion-framework/src/universal_verifier_gadget/wrap_circuit.rs @@ -7,7 +7,7 @@ use plonky2::{ CircuitConfig, CircuitData, CommonCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData, }, - config::{AlgebraicHasher, GenericConfig, Hasher}, + config::{AlgebraicHasher, GenericConfig}, proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget}, }, }; @@ -45,7 +45,6 @@ pub(crate) struct WrapCircuit< impl, C: GenericConfig, const D: usize> WrapCircuit where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { /// build the wrap circuit for a proof enforcing the circuit with verifier data `inner_vd` /// and `inner_cd` @@ -205,7 +204,6 @@ pub(crate) mod test { > TestCircuit where C::Hasher: AlgebraicHasher, - [(); C::Hasher::HASH_SIZE]:, { fn build_circuit( config: CircuitConfig, diff --git a/ryhope/src/storage/mod.rs b/ryhope/src/storage/mod.rs index 043b169ad..962daac9c 100644 --- a/ryhope/src/storage/mod.rs +++ b/ryhope/src/storage/mod.rs @@ -1,3 +1,4 @@ +#![allow(clippy::await_holding_lock)] use anyhow::*; use futures::future::BoxFuture; use itertools::Itertools; diff --git a/ryhope/src/storage/view.rs b/ryhope/src/storage/view.rs index 0a46c5b94..4852894c0 100644 --- a/ryhope/src/storage/view.rs +++ b/ryhope/src/storage/view.rs @@ -24,10 +24,9 @@ pub struct StorageView< ); impl< - 's, T: Debug + Sync + Clone + Serialize + for<'a> Deserialize<'a> + Send, S: EpochStorage + Sync, - > TransactionalStorage for StorageView<'s, T, S> + > TransactionalStorage for StorageView<'_, T, S> where T: Send, { @@ -41,10 +40,9 @@ where } impl< - 's, T: Debug + Sync + Clone + Serialize + for<'a> Deserialize<'a> + Send, S: EpochStorage + Sync, - > EpochStorage for StorageView<'s, T, S> + > EpochStorage for StorageView<'_, T, S> where T: Send, { @@ -86,8 +84,8 @@ pub struct KvStorageAt<'a, T: TreeTopology, S: RoEpochKvStorage _p: PhantomData, } -impl<'a, T: TreeTopology, S: RoEpochKvStorage + Sync> - RoEpochKvStorage for KvStorageAt<'a, T, S> +impl + Sync> RoEpochKvStorage + for KvStorageAt<'_, T, S> { fn initial_epoch(&self) -> Epoch { self.wrapped.initial_epoch() @@ -136,8 +134,8 @@ impl<'a, T: TreeTopology, S: RoEpochKvStorage + Sync> } } -impl<'a, T: TreeTopology, S: RoEpochKvStorage + Sync> - EpochKvStorage for KvStorageAt<'a, T, S> +impl + Sync> EpochKvStorage + for KvStorageAt<'_, T, S> { async fn remove(&mut self, _: T::Key) -> Result<()> { unimplemented!("storage views are read only") diff --git a/verifiable-db/src/api.rs b/verifiable-db/src/api.rs index 852fac874..2429353ed 100644 --- a/verifiable-db/src/api.rs +++ b/verifiable-db/src/api.rs @@ -4,9 +4,9 @@ use crate::{ block_tree, cells_tree, extraction::{ExtractionPI, ExtractionPIWrap}, ivc, - query::{self, api::Parameters as QueryParams, PI_LEN as QUERY_PI_LEN}, + query::{self, api::Parameters as QueryParams, pi_len as query_pi_len}, revelation::{ - self, api::Parameters as RevelationParams, NUM_QUERY_IO, PI_LEN as REVELATION_PI_LEN, + self, api::Parameters as RevelationParams, num_query_io, pi_len as revelation_pi_len, }, row_tree::{self}, }; @@ -145,7 +145,7 @@ impl< const MAX_NUM_PLACEHOLDERS: usize, > WrapCircuitParams where - [(); REVELATION_PI_LEN::]:, + [(); revelation_pi_len::()]:, [(); >::HASH_SIZE]:, { pub fn build(revelation_circuit_set: &RecursiveCircuits) -> Self { @@ -155,13 +155,14 @@ where C, D, { - REVELATION_PI_LEN:: + revelation_pi_len::( + ) }, >::new(default_config(), revelation_circuit_set); let query_verifier_wires = verifier_gadget.verify_proof_in_circuit_set(&mut builder); // expose public inputs of verifier proof as public inputs let verified_proof_pi = query_verifier_wires.get_public_input_targets:: + revelation_pi_len::() }>(); builder.register_public_inputs(verified_proof_pi); let circuit_data = builder.build(); @@ -203,7 +204,7 @@ pub struct QueryParameters< > where [(); MAX_NUM_COLUMNS + MAX_NUM_RESULT_OPS]:, [(); MAX_NUM_ITEMS_PER_OUTPUT - 1]:, - [(); NUM_QUERY_IO::]:, + [(); num_query_io::()]:, [(); 2 * (MAX_NUM_PREDICATE_OPS + MAX_NUM_RESULT_OPS)]:, [(); ROW_TREE_MAX_DEPTH - 1]:, [(); INDEX_TREE_MAX_DEPTH - 1]:, @@ -230,6 +231,7 @@ pub struct QueryParameters< } #[derive(Serialize, Deserialize)] +#[allow(clippy::large_enum_variant)] pub enum QueryCircuitInput< const ROW_TREE_MAX_DEPTH: usize, const INDEX_TREE_MAX_DEPTH: usize, @@ -290,13 +292,13 @@ impl< where [(); MAX_NUM_COLUMNS + MAX_NUM_RESULT_OPS]:, [(); MAX_NUM_ITEMS_PER_OUTPUT - 1]:, - [(); NUM_QUERY_IO::]:, + [(); num_query_io::()]:, [(); 2 * (MAX_NUM_PREDICATE_OPS + MAX_NUM_RESULT_OPS)]:, - [(); QUERY_PI_LEN::]:, - [(); REVELATION_PI_LEN::]:, [(); ROW_TREE_MAX_DEPTH - 1]:, [(); INDEX_TREE_MAX_DEPTH - 1]:, [(); MAX_NUM_ITEMS_PER_OUTPUT * MAX_NUM_OUTPUTS]:, + [(); query_pi_len::()]:, + [(); revelation_pi_len::()]:, { /// Build `QueryParameters` from serialized `ParamsInfo` of `PublicParamaters` pub fn build_params(preprocessing_params_info: &[u8]) -> Result { diff --git a/verifiable-db/src/cells_tree/api.rs b/verifiable-db/src/cells_tree/api.rs index d2b4de1ab..1f648ed53 100644 --- a/verifiable-db/src/cells_tree/api.rs +++ b/verifiable-db/src/cells_tree/api.rs @@ -204,7 +204,7 @@ impl PublicParameters { } /// Get the proof of an empty node. - pub(crate) fn empty_node_proof(&self) -> &ProofWithVK { + pub fn empty_node_proof(&self) -> &ProofWithVK { &self.empty_node_proof } } diff --git a/verifiable-db/src/cells_tree/mod.rs b/verifiable-db/src/cells_tree/mod.rs index af0e85846..42e8d064e 100644 --- a/verifiable-db/src/cells_tree/mod.rs +++ b/verifiable-db/src/cells_tree/mod.rs @@ -33,7 +33,7 @@ pub use public_inputs::PublicInputs; /// A cell represents a column || value tuple. it can be given in the cells tree or as the /// secondary index value in the row tree. #[derive(Clone, Debug, Serialize, Deserialize, Constructor)] -pub(crate) struct Cell { +pub struct Cell { /// identifier of the column for the secondary index pub(crate) identifier: F, /// secondary index value @@ -48,17 +48,14 @@ impl Cell { pw.set_target(wires.identifier, self.identifier); pw.set_bool_target(wires.is_multiplier, self.is_multiplier); } - pub(crate) fn digest(&self) -> Digest { + pub fn digest(&self) -> Digest { map_to_curve_point(&self.to_fields()) } - pub(crate) fn split_digest(&self) -> SplitDigestPoint { + pub fn split_digest(&self) -> SplitDigestPoint { let digest = self.digest(); SplitDigestPoint::from_single_digest_point(digest, self.is_multiplier) } - pub(crate) fn split_and_accumulate_digest( - &self, - child_digest: SplitDigestPoint, - ) -> SplitDigestPoint { + pub fn split_and_accumulate_digest(&self, child_digest: SplitDigestPoint) -> SplitDigestPoint { let sd = self.split_digest(); sd.accumulate(&child_digest) } diff --git a/verifiable-db/src/cells_tree/public_inputs.rs b/verifiable-db/src/cells_tree/public_inputs.rs index 576183d9e..608116ef5 100644 --- a/verifiable-db/src/cells_tree/public_inputs.rs +++ b/verifiable-db/src/cells_tree/public_inputs.rs @@ -30,7 +30,7 @@ pub struct PublicInputs<'a, T> { pub(crate) mul: &'a [T], } -impl<'a> PublicInputCommon for PublicInputs<'a, Target> { +impl PublicInputCommon for PublicInputs<'_, Target> { const RANGES: &'static [PublicInputRange] = &[H_RANGE, DI_RANGE, DM_RANGE]; fn register_args(&self, cb: &mut CBuilder) { diff --git a/verifiable-db/src/lib.rs b/verifiable-db/src/lib.rs index 66ff71b1b..e67983aba 100644 --- a/verifiable-db/src/lib.rs +++ b/verifiable-db/src/lib.rs @@ -1,5 +1,5 @@ // Add this to allow generic const expressions, e.g. `PAD_LEN(NODE_LEN)`. - +#![allow(incomplete_features)] #![feature(generic_const_exprs)] // Add this to allow generic const items, e.g. `const IO_LEN` #![feature(generic_const_items)] diff --git a/verifiable-db/src/query/aggregation/child_proven_single_path_node.rs b/verifiable-db/src/query/aggregation/child_proven_single_path_node.rs index 7196c4d2d..90f0c5120 100644 --- a/verifiable-db/src/query/aggregation/child_proven_single_path_node.rs +++ b/verifiable-db/src/query/aggregation/child_proven_single_path_node.rs @@ -193,7 +193,7 @@ impl CircuitLogicWires mod tests { use super::*; use crate::{ - query::PI_LEN, + query::pi_len, test_utils::{random_aggregation_operations, random_aggregation_public_inputs}, }; use mp2_common::{poseidon::H, utils::ToFields, C, D}; @@ -217,7 +217,7 @@ mod tests { fn build(b: &mut CBuilder) -> Self::Wires { let child_proof = b - .add_virtual_target_arr::<{ PI_LEN:: }>() + .add_virtual_target_arr::<{ pi_len::() }>() .to_vec(); let pi = PublicInputs::::from_slice(&child_proof); diff --git a/verifiable-db/src/query/aggregation/embedded_tree_proven_single_path_node.rs b/verifiable-db/src/query/aggregation/embedded_tree_proven_single_path_node.rs index cca8e09e0..2e2c26056 100644 --- a/verifiable-db/src/query/aggregation/embedded_tree_proven_single_path_node.rs +++ b/verifiable-db/src/query/aggregation/embedded_tree_proven_single_path_node.rs @@ -327,7 +327,7 @@ mod tests { use rand::{thread_rng, Rng}; use crate::{ - query::PI_LEN, + query::pi_len, test_utils::{random_aggregation_operations, random_aggregation_public_inputs}, }; @@ -347,7 +347,7 @@ mod tests { fn build(b: &mut CBuilder) -> Self::Wires { let embedded_tree_proof = b - .add_virtual_target_arr::<{ PI_LEN:: }>() + .add_virtual_target_arr::<{ pi_len::() }>() .to_vec(); let pi = PublicInputs::::from_slice(&embedded_tree_proof); diff --git a/verifiable-db/src/query/aggregation/full_node_index_leaf.rs b/verifiable-db/src/query/aggregation/full_node_index_leaf.rs index 633d93b57..ffe02d5aa 100644 --- a/verifiable-db/src/query/aggregation/full_node_index_leaf.rs +++ b/verifiable-db/src/query/aggregation/full_node_index_leaf.rs @@ -134,7 +134,7 @@ impl CircuitLogicWires mod tests { use super::*; use crate::{ - query::{aggregation::utils::tests::unify_subtree_proof, PI_LEN}, + query::{aggregation::utils::tests::unify_subtree_proof, pi_len}, test_utils::{random_aggregation_operations, random_aggregation_public_inputs}, }; use mp2_common::{utils::ToFields, C}; @@ -155,7 +155,7 @@ mod tests { fn build(b: &mut CBuilder) -> Self::Wires { let subtree_proof = b - .add_virtual_target_arr::<{ PI_LEN:: }>() + .add_virtual_target_arr::<{ pi_len::() }>() .to_vec(); let subtree_pi = PublicInputs::::from_slice(&subtree_proof); diff --git a/verifiable-db/src/query/aggregation/full_node_with_one_child.rs b/verifiable-db/src/query/aggregation/full_node_with_one_child.rs index 7bc2bc549..8ac0b9ef1 100644 --- a/verifiable-db/src/query/aggregation/full_node_with_one_child.rs +++ b/verifiable-db/src/query/aggregation/full_node_with_one_child.rs @@ -214,7 +214,7 @@ mod tests { tests::compute_output_item_value, utils::tests::{unify_child_proof, unify_subtree_proof}, }, - PI_LEN, + pi_len, }, test_utils::{random_aggregation_operations, random_aggregation_public_inputs}, }; @@ -242,7 +242,7 @@ mod tests { fn build(b: &mut CBuilder) -> Self::Wires { let proofs = array::from_fn(|_| { - b.add_virtual_target_arr::<{ PI_LEN:: }>() + b.add_virtual_target_arr::<{ pi_len::() }>() .to_vec() }); let [subtree_pi, child_pi] = diff --git a/verifiable-db/src/query/aggregation/full_node_with_two_children.rs b/verifiable-db/src/query/aggregation/full_node_with_two_children.rs index 7a51ef3b0..1594e2ecb 100644 --- a/verifiable-db/src/query/aggregation/full_node_with_two_children.rs +++ b/verifiable-db/src/query/aggregation/full_node_with_two_children.rs @@ -203,7 +203,7 @@ mod tests { tests::compute_output_item_value, utils::tests::{unify_child_proof, unify_subtree_proof}, }, - PI_LEN, + pi_len, }, test_utils::{random_aggregation_operations, random_aggregation_public_inputs}, }; @@ -233,7 +233,7 @@ mod tests { fn build(b: &mut CBuilder) -> Self::Wires { let proofs = array::from_fn(|_| { - b.add_virtual_target_arr::<{ PI_LEN:: }>() + b.add_virtual_target_arr::<{ pi_len::() }>() .to_vec() }); let [subtree_pi, left_child_pi, right_child_pi] = diff --git a/verifiable-db/src/query/aggregation/mod.rs b/verifiable-db/src/query/aggregation/mod.rs index 6a36be605..431c94276 100644 --- a/verifiable-db/src/query/aggregation/mod.rs +++ b/verifiable-db/src/query/aggregation/mod.rs @@ -12,7 +12,6 @@ use mp2_common::{ F, }; use plonky2::{ - field::types::Field, hash::{hash_types::HashOut, hashing::hash_n_to_hash_no_pad}, plonk::config::GenericHashOut, }; @@ -226,7 +225,7 @@ pub enum ChildPosition { impl ChildPosition { // convert `self` to a flag specifying whether a node is the left child of another node or not - pub(crate) fn to_flag(&self) -> bool { + pub(crate) fn to_flag(self) -> bool { match self { ChildPosition::Left => true, ChildPosition::Right => false, diff --git a/verifiable-db/src/query/aggregation/output_computation.rs b/verifiable-db/src/query/aggregation/output_computation.rs index 250746c4a..ca5fcd159 100644 --- a/verifiable-db/src/query/aggregation/output_computation.rs +++ b/verifiable-db/src/query/aggregation/output_computation.rs @@ -159,7 +159,7 @@ where pub(crate) mod tests { use super::*; use crate::{ - query::{aggregation::tests::compute_output_item_value, PI_LEN}, + query::{aggregation::tests::compute_output_item_value, pi_len}, test_utils::{random_aggregation_operations, random_aggregation_public_inputs}, }; use mp2_common::{types::CURVE_TARGET_LEN, u256::NUM_LIMBS, utils::ToFields, C, D, F}; @@ -179,8 +179,8 @@ pub(crate) mod tests { let mut outputs = vec![]; - for i in 0..S { - let mut output = if ops[i] == op_min { + for (i, &item) in ops.iter().enumerate().take(S) { + let mut output = if item == op_min { U256::MAX } else { U256::ZERO @@ -188,7 +188,7 @@ pub(crate) mod tests { .to_fields(); if i == 0 { - output = if ops[i] == op_id { + output = if item == op_id { Point::NEUTRAL.to_fields() } else { // Pad the current output to `CURVE_TARGET_LEN` for the first item. @@ -223,14 +223,15 @@ pub(crate) mod tests { for TestOutputComputationCircuit where [(); S - 1]:, - [(); PI_LEN::]:, + [(); pi_len::()]:, { // Proof public inputs + expected outputs type Wires = ([Vec; PROOF_NUM], [TestOutputWires; S]); fn build(b: &mut CBuilder) -> Self::Wires { // Initialize the proofs and the expected outputs. - let proofs = array::from_fn(|_| b.add_virtual_target_arr::<{ PI_LEN:: }>().to_vec()); + let proofs = + array::from_fn(|_| b.add_virtual_target_arr::<{ pi_len::() }>().to_vec()); let exp_outputs = array::from_fn(|i| { let output = if i == 0 { b.add_virtual_target_arr::().to_vec() diff --git a/verifiable-db/src/query/aggregation/partial_node.rs b/verifiable-db/src/query/aggregation/partial_node.rs index 3cfd312cd..5e9119e6f 100644 --- a/verifiable-db/src/query/aggregation/partial_node.rs +++ b/verifiable-db/src/query/aggregation/partial_node.rs @@ -290,7 +290,7 @@ mod tests { tests::compute_output_item_value, utils::tests::{unify_child_proof, unify_subtree_proof}, }, - PI_LEN, + pi_len, }, test_utils::{random_aggregation_operations, random_aggregation_public_inputs}, }; @@ -318,7 +318,7 @@ mod tests { fn build(b: &mut CBuilder) -> Self::Wires { let proofs = array::from_fn(|_| { - b.add_virtual_target_arr::<{ PI_LEN:: }>() + b.add_virtual_target_arr::<{ pi_len::() }>() .to_vec() }); let [subtree_pi, child_pi] = diff --git a/verifiable-db/src/query/api.rs b/verifiable-db/src/query/api.rs index 96c77a8bf..8b65f5297 100644 --- a/verifiable-db/src/query/api.rs +++ b/verifiable-db/src/query/api.rs @@ -33,6 +33,7 @@ use super::{ SubProof, TwoProvenChildNodeInput, }, computational_hash_ids::{AggregationOperation, HashPermutation, Output}, + pi_len, universal_circuit::{ output_no_aggregation::Circuit as NoAggOutputCircuit, output_with_aggregation::Circuit as AggOutputCircuit, @@ -44,7 +45,6 @@ use super::{ UniversalQueryCircuitWires, }, }, - PI_LEN, }; use alloy::primitives::U256; use anyhow::{ensure, Result}; @@ -114,13 +114,13 @@ where } /// Initialize input for universal circuit to prove the execution of a query over a /// single row, from the following inputs: - /// - `column_cells`: set of columns (including primary and secondary indexes) of the row being proven - /// - `predicate_operations`: Set of operations employed to compute the filtering predicate of the query for the + /// - `column_cells`: set of columns (including primary and secondary indexes) of the row being proven + /// - `predicate_operations`: Set of operations employed to compute the filtering predicate of the query for the /// row being proven - /// - `results`: Data structure specifying how the results for each row are computed according to the query - /// - `placeholders`: Set of placeholders employed in the query - /// - `is_leaf`: Flag specifying whether the row being proven is stored in a leaf node of the rows tree or not - /// - `query_bounds`: bounds on primary and secondary indexes specified in the query + /// - `results`: Data structure specifying how the results for each row are computed according to the query + /// - `placeholders`: Set of placeholders employed in the query + /// - `is_leaf`: Flag specifying whether the row being proven is stored in a leaf node of the rows tree or not + /// - `query_bounds`: bounds on primary and secondary indexes specified in the query /// Note that the following assumption is expected on the structure of the inputs: /// The output of the last operation in `predicate_operations` is taken as the filtering predicate evaluation; /// this is an assumption exploited in the circuit for efficiency, and it is a simple assumption to be required for @@ -465,13 +465,13 @@ impl< where [(); MAX_NUM_COLUMNS + MAX_NUM_RESULT_OPS]:, [(); MAX_NUM_RESULTS - 1]:, - [(); PI_LEN::]:, + [(); pi_len::()]:, [(); >::HASH_SIZE]:, { /// Build `Parameters` for query circuits pub fn build() -> Self { let builder = - CircuitWithUniversalVerifierBuilder:: }>::new::( + CircuitWithUniversalVerifierBuilder::() }>::new::( default_config(), QUERY_CIRCUIT_SET_SIZE, ); @@ -779,20 +779,14 @@ where #[cfg(test)] mod tests { - use std::{cmp::Ordering, iter::once}; + use std::cmp::Ordering; use alloy::primitives::U256; use itertools::Itertools; - use mp2_common::{ - proof::ProofWithVK, - types::HashOutput, - utils::{Fieldable, ToFields}, - F, - }; + use mp2_common::{proof::ProofWithVK, types::HashOutput, utils::Fieldable, F}; use mp2_test::utils::{gen_random_field_hash, gen_random_u256}; use plonky2::{ field::types::{PrimeField64, Sample}, - hash::{hash_types::HashOut, hashing::hash_n_to_hash_no_pad}, plonk::config::GenericHashOut, }; use rand::{thread_rng, Rng}; @@ -804,7 +798,7 @@ mod tests { }, api::{CircuitInput, Parameters}, computational_hash_ids::{ - AggregationOperation, ColumnIDs, HashPermutation, Operation, PlaceholderIdentifier, + AggregationOperation, ColumnIDs, Operation, PlaceholderIdentifier, }, public_inputs::PublicInputs, universal_circuit::universal_circuit_inputs::{ @@ -822,12 +816,7 @@ mod tests { const MAX_NUM_PREDICATE_OPS: usize = 20; const MAX_NUM_RESULT_OPS: usize = 20; const MAX_NUM_RESULTS: usize = 10; - let column_ids = (0..NUM_COLUMNS) - .map(|_| { - let id: u32 = rng.gen(); - id as u64 - }) - .collect_vec(); + let column_ids = ColumnIDs::new( F::rand().to_canonical_u64(), F::rand().to_canonical_u64(), @@ -1308,7 +1297,7 @@ mod tests { subtree_proof, Some(node_info_3), Some(node_info_9), - node_info_8.clone(), + node_info_8, false, &query_bounds, ) @@ -1337,7 +1326,7 @@ mod tests { subtree_proof, None, Some(node_info_8), - node_info_0.clone(), + node_info_0, false, &query_bounds, ) @@ -1376,11 +1365,7 @@ mod tests { column_values[0][0], ); let hash_0 = node_info_0.compute_node_hash(primary_index_id); - let column_cells = column_values[0] - .iter() - .zip(column_ids.to_vec().iter()) - .map(|(&value, &id)| ColumnCell::new(id.to_canonical_u64(), value)) - .collect_vec(); + // compute hashes associated to query, which are needed as inputs let query_hashes = QueryHashNonExistenceCircuits::new::< MAX_NUM_COLUMNS, @@ -1397,16 +1382,14 @@ mod tests { ) .unwrap(); let input = Input::new_non_existence_input( - node_info_0.clone(), + node_info_0, None, None, node_info_0.value, &[ column_ids.primary.to_canonical_u64(), column_ids.secondary.to_canonical_u64(), - ] - .try_into() - .unwrap(), + ], &[AggregationOperation::SumOp], query_hashes, false, @@ -1433,9 +1416,9 @@ mod tests { let subtree_proof = SubProof::new_child_proof(proof_0, ChildPosition::Left).unwrap(); let input = Input::new_single_path( subtree_proof, - Some(node_info_0.clone()), + Some(node_info_0), None, - node_info_1.clone(), + node_info_1, false, &query_bounds, ) @@ -1466,9 +1449,9 @@ mod tests { let subtree_proof = SubProof::new_child_proof(proof_1, ChildPosition::Left).unwrap(); let input = Input::new_single_path( subtree_proof, - Some(node_info_1.clone()), - Some(node_info_3.clone()), - node_info_2.clone(), + Some(node_info_1), + Some(node_info_3), + node_info_2, false, &query_bounds, ) @@ -1477,19 +1460,11 @@ mod tests { .unwrap() .into(); - check_pis( - &root_proof.public_inputs, - node_info_2.clone(), - &column_values, - ); + check_pis(&root_proof.public_inputs, node_info_2, &column_values); // generate non-existence proof starting from intermediate node (i.e., node 1) rather than a leaf node // generate proof with non-existence circuit for node 1 - let column_cells = column_values[1] - .iter() - .zip(column_ids.to_vec().iter()) - .map(|(&value, &id)| ColumnCell::new(id.to_canonical_u64(), value)) - .collect_vec(); + // compute hashes associated to query, which are needed as inputs let query_hashes = QueryHashNonExistenceCircuits::new::< MAX_NUM_COLUMNS, @@ -1506,16 +1481,14 @@ mod tests { ) .unwrap(); let input = Input::new_non_existence_input( - node_info_1.clone(), + node_info_1, Some(node_info_0), // node 0 is the left child None, node_info_1.value, &[ column_ids.primary.to_canonical_u64(), column_ids.secondary.to_canonical_u64(), - ] - .try_into() - .unwrap(), + ], &[AggregationOperation::SumOp], query_hashes, false, @@ -1531,9 +1504,9 @@ mod tests { let subtree_proof = SubProof::new_child_proof(proof_1, ChildPosition::Left).unwrap(); let input = Input::new_single_path( subtree_proof, - Some(node_info_1.clone()), - Some(node_info_3.clone()), - node_info_2.clone(), + Some(node_info_1), + Some(node_info_3), + node_info_2, false, &query_bounds, ) @@ -1564,14 +1537,10 @@ mod tests { gen_row(max_query_primary, IndexValueBounds::InRange), ]; // sort column values according to primary/secondary index values - column_values.sort_by(|a, b| { - if a[0] < b[0] { - Ordering::Less - } else if a[0] > b[0] { - Ordering::Greater - } else { - a[1].cmp(&b[1]) - } + column_values.sort_by(|a, b| match a[0].cmp(&b[0]) { + Ordering::Less => Ordering::Less, + Ordering::Greater => Ordering::Greater, + Ordering::Equal => a[1].cmp(&b[1]), }); // generate proof for node A rows tree @@ -1588,11 +1557,7 @@ mod tests { column_values[2][1], ); let hash_2 = node_info_2.compute_node_hash(secondary_index_id); - let column_cells = column_values[2] - .iter() - .zip(column_ids.to_vec().iter()) - .map(|(&value, &id)| ColumnCell::new(id.to_canonical_u64(), value)) - .collect_vec(); + // compute hashes associated to query, which are needed as inputs let query_hashes = QueryHashNonExistenceCircuits::new::< MAX_NUM_COLUMNS, @@ -1609,16 +1574,14 @@ mod tests { ) .unwrap(); let input = Input::new_non_existence_input( - node_info_2.clone(), + node_info_2, None, None, column_values[2][0], // we need to place the primary index value associated to this row &[ column_ids.primary.to_canonical_u64(), column_ids.secondary.to_canonical_u64(), - ] - .try_into() - .unwrap(), + ], &[AggregationOperation::SumOp], query_hashes, true, @@ -1663,7 +1626,7 @@ mod tests { assert_eq!(hash_1, get_tree_hash_from_proof(&proof_1),); // generate proof for node A (leaf of index tree) - let node_info_A = NodeInfo::new( + let node_info_a = NodeInfo::new( &HashOutput::try_from(hash_1.to_bytes()).unwrap(), None, None, @@ -1671,20 +1634,14 @@ mod tests { column_values[0][0], column_values[0][0], ); - let hash_A = node_info_A.compute_node_hash(primary_index_id); + let hash_a = node_info_a.compute_node_hash(primary_index_id); let subtree_proof = SubProof::new_embedded_tree_proof(proof_1).unwrap(); - let input = Input::new_single_path( - subtree_proof, - None, - None, - node_info_A.clone(), - false, - &query_bounds, - ) - .unwrap(); - let proof_A = params.generate_proof(input).unwrap(); + let input = + Input::new_single_path(subtree_proof, None, None, node_info_a, false, &query_bounds) + .unwrap(); + let proof_a = params.generate_proof(input).unwrap(); // check hash - assert_eq!(hash_A, get_tree_hash_from_proof(&proof_A),); + assert_eq!(hash_a, get_tree_hash_from_proof(&proof_a),); // generate proof for node B rows tree // all the nodes are in the range, so we generate proofs for each of the nodes @@ -1714,14 +1671,14 @@ mod tests { // generate proof for node B of the index tree (root node) let node_info_root = NodeInfo::new( &HashOutput::try_from(hash_4.to_bytes()).unwrap(), - Some(&HashOutput::try_from(hash_A.to_bytes()).unwrap()), + Some(&HashOutput::try_from(hash_a.to_bytes()).unwrap()), None, column_values[4][0], column_values[0][0], column_values[5][0], ); let input = Input::new_partial_node( - proof_A, + proof_a, proof_4, None, ChildPosition::Left, diff --git a/verifiable-db/src/query/computational_hash_ids.rs b/verifiable-db/src/query/computational_hash_ids.rs index b1d78c4a2..42c135a2c 100644 --- a/verifiable-db/src/query/computational_hash_ids.rs +++ b/verifiable-db/src/query/computational_hash_ids.rs @@ -639,7 +639,7 @@ impl FromFields for AggregationOperation { impl AggregationOperation { /// Return the identity value for `self` operation - pub(crate) fn identity_value(&self) -> Vec { + pub fn identity_value(&self) -> Vec { match self { AggregationOperation::SumOp => U256::ZERO.to_fields(), AggregationOperation::MinOp => U256::MAX.to_fields(), diff --git a/verifiable-db/src/query/merkle_path.rs b/verifiable-db/src/query/merkle_path.rs index 050bbadc8..c442aa51e 100644 --- a/verifiable-db/src/query/merkle_path.rs +++ b/verifiable-db/src/query/merkle_path.rs @@ -157,7 +157,6 @@ where pub fn new( path: &[(NodeInfo, ChildPosition)], siblings: &[Option], - index_id: u64, ) -> Result { let num_real_nodes = path.len(); ensure!( @@ -186,9 +185,7 @@ where siblings .get(i) .and_then(|sibling| { - sibling - .clone() - .and_then(|node_hash| Some(HashOut::from_bytes((&node_hash).into()))) + sibling.and_then(|node_hash| Some(HashOut::from_bytes(node_hash.as_ref()))) }) .unwrap_or(*empty_poseidon_hash()) }); @@ -301,8 +298,8 @@ mod tests { utils::{gen_random_field_hash, gen_random_u256}, }; use plonky2::{ - field::types::{PrimeField64, Sample}, - hash::hash_types::{HashOut, HashOutTarget}, + field::types::Sample, + hash::hash_types::HashOutTarget, iop::{ target::Target, witness::{PartialWitness, WitnessWrite}, @@ -375,45 +372,43 @@ mod tests { ) }; - let node_E = random_node(None, None); // it's a leaf node, so no children - let node_F = random_node(None, None); - let node_G = random_node(None, None); - let node_E_hash = - HashOutput::try_from(node_E.compute_node_hash(index_id).to_bytes()).unwrap(); - let node_D = random_node( - Some(&node_E_hash), - Some(&HashOutput::try_from(node_F.compute_node_hash(index_id).to_bytes()).unwrap()), + let node_e = random_node(None, None); // it's a leaf node, so no children + let node_f = random_node(None, None); + let node_g = random_node(None, None); + let node_e_hash = + HashOutput::try_from(node_e.compute_node_hash(index_id).to_bytes()).unwrap(); + let node_d = random_node( + Some(&node_e_hash), + Some(&HashOutput::try_from(node_f.compute_node_hash(index_id).to_bytes()).unwrap()), ); - let node_B = random_node( - Some(&HashOutput::try_from(node_D.compute_node_hash(index_id).to_bytes()).unwrap()), + let node_b = random_node( + Some(&HashOutput::try_from(node_d.compute_node_hash(index_id).to_bytes()).unwrap()), None, ); - let node_C = random_node( + let node_c = random_node( None, - Some(&HashOutput::try_from(node_G.compute_node_hash(index_id).to_bytes()).unwrap()), + Some(&HashOutput::try_from(node_g.compute_node_hash(index_id).to_bytes()).unwrap()), ); - let node_B_hash = - HashOutput::try_from(node_B.compute_node_hash(index_id).to_bytes()).unwrap(); - let node_C_hash = - HashOutput::try_from(node_C.compute_node_hash(index_id).to_bytes()).unwrap(); - let node_A = random_node(Some(&node_B_hash), Some(&node_C_hash)); - let root = node_A.compute_node_hash(index_id); + let node_b_hash = + HashOutput::try_from(node_b.compute_node_hash(index_id).to_bytes()).unwrap(); + let node_c_hash = + HashOutput::try_from(node_c.compute_node_hash(index_id).to_bytes()).unwrap(); + let node_a = random_node(Some(&node_b_hash), Some(&node_c_hash)); + let root = node_a.compute_node_hash(index_id); // verify Merkle-path related to leaf F const MAX_DEPTH: usize = 10; let path = vec![ - (node_D.clone(), ChildPosition::Right), // we start from the ancestor of the start node of the path - (node_B.clone(), ChildPosition::Left), - (node_A.clone(), ChildPosition::Left), + (node_d, ChildPosition::Right), // we start from the ancestor of the start node of the path + (node_b, ChildPosition::Left), + (node_a, ChildPosition::Left), ]; - let siblings = vec![Some(node_E_hash), None, Some(node_C_hash.clone())]; - let merkle_path_inputs = - MerklePathGadget::::new(&path, &siblings, index_id.to_canonical_u64()) - .unwrap(); + let siblings = vec![Some(node_e_hash), None, Some(node_c_hash)]; + let merkle_path_inputs = MerklePathGadget::::new(&path, &siblings).unwrap(); let circuit = TestMerklePathGadget:: { merkle_path_inputs, - start_node: node_F.clone(), + start_node: node_f, index_id, }; @@ -423,16 +418,14 @@ mod tests { // verify Merkle-path related to leaf G let path = vec![ - (node_C.clone(), ChildPosition::Right), - (node_A.clone(), ChildPosition::Right), + (node_c, ChildPosition::Right), + (node_a, ChildPosition::Right), ]; - let siblings = vec![None, Some(node_B_hash)]; - let merkle_path_inputs = - MerklePathGadget::::new(&path, &siblings, index_id.to_canonical_u64()) - .unwrap(); + let siblings = vec![None, Some(node_b_hash)]; + let merkle_path_inputs = MerklePathGadget::::new(&path, &siblings).unwrap(); let circuit = TestMerklePathGadget:: { merkle_path_inputs, - start_node: node_G.clone(), + start_node: node_g, index_id, }; @@ -441,17 +434,12 @@ mod tests { assert_eq!(proof.public_inputs, root.to_vec()); // Verify Merkle-path related to node D - let path = vec![ - (node_B.clone(), ChildPosition::Left), - (node_A.clone(), ChildPosition::Left), - ]; - let siblings = vec![None, Some(node_C_hash)]; - let merkle_path_inputs = - MerklePathGadget::::new(&path, &siblings, index_id.to_canonical_u64()) - .unwrap(); + let path = vec![(node_b, ChildPosition::Left), (node_a, ChildPosition::Left)]; + let siblings = vec![None, Some(node_c_hash)]; + let merkle_path_inputs = MerklePathGadget::::new(&path, &siblings).unwrap(); let circuit = TestMerklePathGadget:: { merkle_path_inputs, - start_node: node_D.clone(), + start_node: node_d, index_id, }; diff --git a/verifiable-db/src/query/mod.rs b/verifiable-db/src/query/mod.rs index 2366b4ae8..b07c5e45b 100644 --- a/verifiable-db/src/query/mod.rs +++ b/verifiable-db/src/query/mod.rs @@ -8,6 +8,6 @@ pub mod merkle_path; pub mod public_inputs; pub mod universal_circuit; -// Without this skipping config, the generic parameter was deleted when `cargo fmt`. -#[rustfmt::skip] -pub const PI_LEN: usize = PublicInputs::::total_len(); +pub const fn pi_len() -> usize { + PublicInputs::::total_len() +} diff --git a/verifiable-db/src/query/universal_circuit/output_with_aggregation.rs b/verifiable-db/src/query/universal_circuit/output_with_aggregation.rs index b23174c19..c1705f142 100644 --- a/verifiable-db/src/query/universal_circuit/output_with_aggregation.rs +++ b/verifiable-db/src/query/universal_circuit/output_with_aggregation.rs @@ -214,7 +214,6 @@ mod tests { utils::{gen_random_field_hash, gen_random_u256}, }; use plonky2::{ - field::types::Field, iop::{ target::{BoolTarget, Target}, witness::{PartialWitness, WitnessWrite}, @@ -377,6 +376,7 @@ mod tests { const ACTUAL_NUM_RESULTS: usize, > TestOutputComponentInputs { + #[allow(clippy::too_many_arguments)] fn new( column_values: [U256; MAX_NUM_COLUMNS], column_hash: [ComputationalHash; MAX_NUM_COLUMNS], diff --git a/verifiable-db/src/query/universal_circuit/universal_circuit_inputs.rs b/verifiable-db/src/query/universal_circuit/universal_circuit_inputs.rs index ee314ae59..76d56a471 100644 --- a/verifiable-db/src/query/universal_circuit/universal_circuit_inputs.rs +++ b/verifiable-db/src/query/universal_circuit/universal_circuit_inputs.rs @@ -82,6 +82,11 @@ impl Placeholders { self.0.len() } + /// Returns whether `self` is empty or not + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + /// Return set of placeholders ids, in the order expected in the public inputs of the final /// proof pub fn ids(&self) -> Vec { @@ -345,7 +350,7 @@ impl ResultStructure { /// `column_values` as the operands for the operations having `InputOperand::Column` /// operands, and the provided `placeholders` for the operations having `InputOperand::Placeholder` /// operands. - pub(crate) fn compute_output_values( + pub fn compute_output_values( &self, column_values: &[U256], placeholders: &Placeholders, diff --git a/verifiable-db/src/query/universal_circuit/universal_query_circuit.rs b/verifiable-db/src/query/universal_circuit/universal_query_circuit.rs index 7cd8ed9c0..1e3ec0919 100644 --- a/verifiable-db/src/query/universal_circuit/universal_query_circuit.rs +++ b/verifiable-db/src/query/universal_circuit/universal_query_circuit.rs @@ -37,11 +37,11 @@ use crate::query::{ computational_hash_ids::{ ComputationalHashCache, HashPermutation, Operation, Output, PlaceholderIdentifier, }, + pi_len, public_inputs::PublicInputs, universal_circuit::{ basic_operation::BasicOperationInputs, universal_circuit_inputs::OutputItem, }, - PI_LEN, }; use super::{ @@ -967,14 +967,6 @@ fn dummy_placeholder(placeholders: &Placeholders) -> Placeholder { } } -fn dummy_placeholder_from_query_bounds(query_bounds: &QueryBounds) -> Placeholder { - let placeholders = Placeholders::new_empty( - query_bounds.min_query_primary(), - query_bounds.max_query_primary(), - ); - dummy_placeholder(&placeholders) -} - pub(crate) fn dummy_placeholder_id() -> PlaceholderId { PlaceholderIdentifier::default() } @@ -1046,7 +1038,7 @@ where T, >; - const NUM_PUBLIC_INPUTS: usize = PI_LEN::; + const NUM_PUBLIC_INPUTS: usize = pi_len::(); fn circuit_logic( builder: &mut CircuitBuilder, @@ -1169,7 +1161,7 @@ mod tests { utils::gen_random_u256, }; use plonky2::{ - field::types::{Field, PrimeField64, Sample}, + field::types::{PrimeField64, Sample}, hash::hashing::hash_n_to_hash_no_pad, iop::witness::PartialWitness, plonk::{circuit_builder::CircuitBuilder, config::GenericHashOut}, diff --git a/verifiable-db/src/results_tree/binding/binding_results.rs b/verifiable-db/src/results_tree/binding/binding_results.rs index 00454fc16..9431af03c 100644 --- a/verifiable-db/src/results_tree/binding/binding_results.rs +++ b/verifiable-db/src/results_tree/binding/binding_results.rs @@ -26,6 +26,8 @@ pub struct BindingResultsWires; pub struct BindingResultsCircuit; impl BindingResultsCircuit { + // CHORE: Remove this when relevant PR is merged + #[allow(dead_code)] pub fn build( b: &mut CBuilder, query_proof: &QueryProofPI, @@ -97,9 +99,10 @@ impl BindingResultsCircuit { mod tests { use super::*; use crate::{ + query::pi_len as query_pi_len, results_tree::construction::{ public_inputs::ResultsConstructionPublicInputs, - tests::random_results_construction_public_inputs, + tests::{pi_len, random_results_construction_public_inputs}, }, test_utils::{random_aggregation_operations, random_aggregation_public_inputs}, }; @@ -114,8 +117,8 @@ mod tests { const S: usize = 20; - const QUERY_PI_LEN: usize = crate::query::PI_LEN::; - const RESULTS_CONSTRUCTION_PI_LEN: usize = crate::results_tree::construction::PI_LEN::; + const QUERY_PI_LEN: usize = query_pi_len::(); + const RESULTS_CONSTRUCTION_PI_LEN: usize = pi_len::(); #[derive(Clone, Debug)] struct TestBindingResultsCircuit<'a> { diff --git a/verifiable-db/src/results_tree/binding/public_inputs.rs b/verifiable-db/src/results_tree/binding/public_inputs.rs index eee9756c5..62736ab83 100644 --- a/verifiable-db/src/results_tree/binding/public_inputs.rs +++ b/verifiable-db/src/results_tree/binding/public_inputs.rs @@ -45,6 +45,8 @@ pub struct PublicInputs<'a, T> { const NUM_PUBLIC_INPUTS: usize = ResultsBindingPublicInputs::Overflow as usize + 1; +// CHORE: Remove this when relevant PR is merged +#[allow(dead_code)] impl<'a, T: Clone> PublicInputs<'a, T> { const PI_RANGES: [PublicInputRange; NUM_PUBLIC_INPUTS] = [ Self::to_range(ResultsBindingPublicInputs::ResultsTreeHash), @@ -169,6 +171,8 @@ impl PublicInputCommon for PublicInputs<'_, Target> { } } +// CHORE: Remove this when relevant PR is merged +#[allow(dead_code)] impl PublicInputs<'_, Target> { pub fn results_tree_hash_target(&self) -> HashOutTarget { HashOutTarget::try_from(self.to_results_tree_hash_raw()).unwrap() @@ -195,6 +199,8 @@ impl PublicInputs<'_, Target> { } } +// CHORE: Remove this when relevant PR is merged +#[allow(dead_code)] impl PublicInputs<'_, F> { pub fn results_tree_hash(&self) -> HashOut { HashOut::try_from(self.to_results_tree_hash_raw()).unwrap() diff --git a/verifiable-db/src/results_tree/construction/leaf_node.rs b/verifiable-db/src/results_tree/construction/leaf_node.rs index 947d9fa34..5888c9c96 100644 --- a/verifiable-db/src/results_tree/construction/leaf_node.rs +++ b/verifiable-db/src/results_tree/construction/leaf_node.rs @@ -93,8 +93,8 @@ impl CircuitLogicWires for LeafNodeWi #[cfg(test)] mod tests { use super::*; - use crate::results_tree::construction::{ - tests::random_results_construction_public_inputs, PI_LEN, + use crate::results_tree::construction::tests::{ + pi_len, random_results_construction_public_inputs, }; use mp2_common::{utils::ToFields, C}; use mp2_test::circuit::{run_circuit, UserCircuit}; @@ -112,7 +112,7 @@ mod tests { type Wires = (LeafNodeWires, Vec); fn build(b: &mut CBuilder) -> Self::Wires { - let subtree_proof = b.add_virtual_target_arr::<{ PI_LEN:: }>().to_vec(); + let subtree_proof = b.add_virtual_target_arr::<{ pi_len::() }>().to_vec(); let subtree_pi = PublicInputs::::from_slice(&subtree_proof); let wires = LeafNodeCircuit::build(b, &subtree_pi); diff --git a/verifiable-db/src/results_tree/construction/mod.rs b/verifiable-db/src/results_tree/construction/mod.rs index e9e436513..1415ede34 100644 --- a/verifiable-db/src/results_tree/construction/mod.rs +++ b/verifiable-db/src/results_tree/construction/mod.rs @@ -1,5 +1,3 @@ -use mp2_common::F; - pub(crate) mod leaf_node; pub(crate) mod node_with_one_child; pub(crate) mod node_with_two_children; @@ -7,16 +5,12 @@ pub(crate) mod public_inputs; pub(crate) mod results_tree_with_duplicates; pub(crate) mod results_tree_without_duplicates; -// Without this skipping config, the generic parameter was deleted when `cargo fmt`. -#[rustfmt::skip] -pub(crate) const PI_LEN: usize = public_inputs::PublicInputs::::total_len(); - #[cfg(test)] pub(crate) mod tests { use super::*; use alloy::primitives::U256; use itertools::Itertools; - use mp2_common::utils::ToFields; + use mp2_common::{utils::ToFields, F}; use mp2_test::utils::random_vector; use plonky2::field::types::{Field, Sample}; use plonky2_ecgfp5::curve::curve::Point; @@ -24,6 +18,12 @@ pub(crate) mod tests { use rand::{prelude::SliceRandom, thread_rng, Rng}; use std::array; + /// Constant function that returns the length of [`PublicInputs`] based on + /// some constant value [`S`]. + pub(crate) const fn pi_len() -> usize { + public_inputs::PublicInputs::::total_len() + } + /// Generate S number of proof public input slices. The each returned proof public inputs /// could be constructed by `PublicInputs::from_slice` function. pub(crate) fn random_results_construction_public_inputs( @@ -42,7 +42,7 @@ pub(crate) mod tests { .map(PublicInputs::::to_range); array::from_fn(|_| { - let mut pi = random_vector::(PI_LEN::).to_fields(); + let mut pi = random_vector::(pi_len::()).to_fields(); // Set no duplicates flag. pi[no_dup_range.clone()].copy_from_slice(&[no_dup_flag]); diff --git a/verifiable-db/src/results_tree/construction/node_with_one_child.rs b/verifiable-db/src/results_tree/construction/node_with_one_child.rs index 69005d64c..c472d1f01 100644 --- a/verifiable-db/src/results_tree/construction/node_with_one_child.rs +++ b/verifiable-db/src/results_tree/construction/node_with_one_child.rs @@ -294,11 +294,8 @@ where #[cfg(test)] mod tests { use super::*; - use crate::results_tree::construction::{ - tests::{ - random_results_construction_public_inputs, unify_child_proof, unify_subtree_proof, - }, - PI_LEN, + use crate::results_tree::construction::tests::{ + pi_len, random_results_construction_public_inputs, unify_child_proof, unify_subtree_proof, }; use mp2_common::{group_hashing::add_weierstrass_point, poseidon::H, utils::ToFields, C}; use mp2_test::circuit::{run_circuit, UserCircuit}; @@ -319,7 +316,8 @@ mod tests { type Wires = (NodeWithOneChildWires, Vec, Vec); fn build(b: &mut CBuilder) -> Self::Wires { - let proofs = array::from_fn(|_| b.add_virtual_target_arr::<{ PI_LEN:: }>().to_vec()); + let proofs = + array::from_fn(|_| b.add_virtual_target_arr::<{ pi_len::() }>().to_vec()); let [subtree_pi, child_pi] = array::from_fn(|i| PublicInputs::::from_slice(&proofs[i])); diff --git a/verifiable-db/src/results_tree/construction/node_with_two_children.rs b/verifiable-db/src/results_tree/construction/node_with_two_children.rs index 1c49fff40..d411b2453 100644 --- a/verifiable-db/src/results_tree/construction/node_with_two_children.rs +++ b/verifiable-db/src/results_tree/construction/node_with_two_children.rs @@ -248,11 +248,8 @@ where #[cfg(test)] mod tests { use super::*; - use crate::results_tree::construction::{ - tests::{ - random_results_construction_public_inputs, unify_child_proof, unify_subtree_proof, - }, - PI_LEN, + use crate::results_tree::construction::tests::{ + pi_len, random_results_construction_public_inputs, unify_child_proof, unify_subtree_proof, }; use mp2_common::{group_hashing::add_weierstrass_point, utils::ToFields, C}; use mp2_test::circuit::{run_circuit, UserCircuit}; @@ -279,7 +276,8 @@ mod tests { ); fn build(b: &mut CBuilder) -> Self::Wires { - let proofs = array::from_fn(|_| b.add_virtual_target_arr::<{ PI_LEN:: }>().to_vec()); + let proofs = + array::from_fn(|_| b.add_virtual_target_arr::<{ pi_len::() }>().to_vec()); let [subtree_pi, left_child_pi, right_child_pi] = array::from_fn(|i| PublicInputs::::from_slice(&proofs[i])); diff --git a/verifiable-db/src/results_tree/construction/public_inputs.rs b/verifiable-db/src/results_tree/construction/public_inputs.rs index 7ae969d26..fd8e3c2e6 100644 --- a/verifiable-db/src/results_tree/construction/public_inputs.rs +++ b/verifiable-db/src/results_tree/construction/public_inputs.rs @@ -194,7 +194,7 @@ impl<'a, T: Clone, const S: usize> PublicInputs<'a, T, S> { acc: &input[Self::PI_RANGES[10].clone()], } } - + #[allow(clippy::too_many_arguments)] pub fn new( h: &'a [T], min_val: &'a [T], @@ -223,6 +223,8 @@ impl<'a, T: Clone, const S: usize> PublicInputs<'a, T, S> { } } + // CHORE: Remove this when relevant PR is merged + #[allow(dead_code)] pub fn to_vec(&self) -> Vec { self.h .iter() @@ -315,6 +317,8 @@ impl PublicInputs<'_, Target, S> { } } +// CHORE: Remove this when relevant PR is merged +#[allow(dead_code)] impl PublicInputs<'_, F, S> { pub fn tree_hash(&self) -> HashOut { HashOut::try_from(self.to_tree_hash_raw()).unwrap() diff --git a/verifiable-db/src/revelation/api.rs b/verifiable-db/src/revelation/api.rs index 21837dd7f..26dea52e3 100644 --- a/verifiable-db/src/revelation/api.rs +++ b/verifiable-db/src/revelation/api.rs @@ -11,9 +11,8 @@ use mp2_common::{ u256::is_less_than_or_equal_to_u256_arr, C, D, F, }; -use plonky2::{ - field::types::PrimeField64, - plonk::{circuit_data::VerifierOnlyCircuitData, config::Hasher, proof::ProofWithPublicInputs}, +use plonky2::plonk::{ + circuit_data::VerifierOnlyCircuitData, config::Hasher, proof::ProofWithPublicInputs, }; use recursion_framework::{ circuit_builder::{CircuitWithUniversalVerifier, CircuitWithUniversalVerifierBuilder}, @@ -29,10 +28,10 @@ use crate::{ aggregation::QueryBounds, api::{CircuitInput as QueryCircuitInput, Parameters as QueryParams}, computational_hash_ids::ColumnIDs, + pi_len as query_pi_len, universal_circuit::universal_circuit_inputs::{ BasicOperation, Placeholders, ResultStructure, }, - PI_LEN as QUERY_PI_LEN, }, revelation::{ placeholders_check::CheckPlaceholderGadget, @@ -44,6 +43,7 @@ use crate::{ }; use super::{ + num_query_io, pi_len, revelation_unproven_offset::{ RecursiveCircuitInputs as RecursiveCircuitInputsUnporvenOffset, RevelationCircuit as RevelationCircuitUnprovenOffset, RowPath, @@ -52,7 +52,6 @@ use super::{ CircuitBuilderParams, RecursiveCircuitInputs, RecursiveCircuitWires, RevelationWithoutResultsTreeCircuit, }, - NUM_QUERY_IO, PI_LEN, }; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] /// Data structure employed to provide input data related to a matching row @@ -147,11 +146,11 @@ pub struct Parameters< const MAX_NUM_PLACEHOLDERS: usize, > where [(); MAX_NUM_ITEMS_PER_OUTPUT - 1]:, - [(); NUM_QUERY_IO::]:, [(); ROW_TREE_MAX_DEPTH - 1]:, [(); INDEX_TREE_MAX_DEPTH - 1]:, [(); MAX_NUM_ITEMS_PER_OUTPUT * MAX_NUM_OUTPUTS]:, [(); 2 * (MAX_NUM_PREDICATE_OPS + MAX_NUM_RESULT_OPS)]:, + [(); num_query_io::()]:, { revelation_no_results_tree: CircuitWithUniversalVerifier< F, @@ -195,6 +194,7 @@ pub struct Parameters< /// upper bound on the number of items being found in `SELECT` statement of the query /// - `MAX_NUM_PLACEHOLDERS`: upper bound on the number of placeholders we allow in a query /// - `NUM_PLACEHOLDERS_HASHED`: number of placeholders being hashed in the placeholder hash +#[allow(clippy::large_enum_variant)] pub enum CircuitInput< const ROW_TREE_MAX_DEPTH: usize, const INDEX_TREE_MAX_DEPTH: usize, @@ -267,7 +267,7 @@ where [(); INDEX_TREE_MAX_DEPTH - 1]:, [(); MAX_NUM_ITEMS_PER_OUTPUT * MAX_NUM_OUTPUTS]:, [(); MAX_NUM_ITEMS_PER_OUTPUT - 1]:, - [(); QUERY_PI_LEN::]:, + [(); query_pi_len::()]:, [(); 2 * (MAX_NUM_PREDICATE_OPS + MAX_NUM_RESULT_OPS)]:, [(); MAX_NUM_COLUMNS + MAX_NUM_RESULT_OPS]:, { @@ -331,6 +331,7 @@ where /// - `results_structure`: Data about the operations and items returned in the `SELECT` clause of the query /// - `limit, offset`: limit and offset values specified in the query /// - `distinct`: Flag specifying whether the DISTINCT keyword was specified in the query + #[allow(clippy::too_many_arguments)] pub fn new_revelation_tabular( preprocessing_proof: Vec, matching_rows: Vec, @@ -391,13 +392,9 @@ where )?; let placeholder_inputs = CheckPlaceholderGadget::new(query_bounds, placeholders, placeholder_hash_ids)?; - let index_ids = [ - column_ids.primary.to_canonical_u64(), - column_ids.secondary.to_canonical_u64(), - ]; + let revelation_circuit = RevelationCircuitUnprovenOffset::new( row_paths, - index_ids, &results_structure.output_ids, result_values, limit, @@ -438,15 +435,15 @@ impl< > where [(); MAX_NUM_ITEMS_PER_OUTPUT - 1]:, - [(); NUM_QUERY_IO::]:, + [(); num_query_io::()]:, [(); >::HASH_SIZE]:, - [(); PI_LEN::]:, [(); ROW_TREE_MAX_DEPTH - 1]:, [(); INDEX_TREE_MAX_DEPTH - 1]:, [(); MAX_NUM_ITEMS_PER_OUTPUT * MAX_NUM_OUTPUTS]:, [(); MAX_NUM_COLUMNS + MAX_NUM_RESULT_OPS]:, - [(); QUERY_PI_LEN::]:, + [(); query_pi_len::()]:, [(); 2 * (MAX_NUM_PREDICATE_OPS + MAX_NUM_RESULT_OPS)]:, + [(); pi_len::()]:, { pub fn build( query_circuit_set: &RecursiveCircuits, @@ -456,7 +453,7 @@ where let builder = CircuitWithUniversalVerifierBuilder::< F, D, - { PI_LEN:: }, + { pi_len::() }, >::new::(default_config(), REVELATION_CIRCUIT_SET_SIZE); let build_parameters = CircuitBuilderParams { query_circuit_set: query_circuit_set.clone(), @@ -590,14 +587,14 @@ mod tests { use crate::{ ivc::PublicInputs as PreprocessingPI, query::{ - api::CircuitInput as QueryInput, computational_hash_ids::{ColumnIDs, Identifiers}, public_inputs::PublicInputs as QueryPI, }, revelation::{ api::{CircuitInput, Parameters}, + num_query_io, tests::compute_results_from_query_proof, - PublicInputs, NUM_PREPROCESSING_IO, NUM_QUERY_IO, + PublicInputs, NUM_PREPROCESSING_IO, }, }; @@ -612,7 +609,7 @@ mod tests { F, C, D, - { NUM_QUERY_IO:: }, + { num_query_io::() }, >::default(); let preprocessing_circuits = TestingRecursiveCircuits::::default(); diff --git a/verifiable-db/src/revelation/mod.rs b/verifiable-db/src/revelation/mod.rs index 5253b4ca3..eaa4045ff 100644 --- a/verifiable-db/src/revelation/mod.rs +++ b/verifiable-db/src/revelation/mod.rs @@ -1,6 +1,6 @@ //! Module including the revelation circuits for query -use crate::{ivc::NUM_IO, query::PI_LEN as QUERY_PI_LEN}; +use crate::{ivc::NUM_IO, query::pi_len as query_pi_len}; use mp2_common::F; pub mod api; @@ -12,18 +12,17 @@ mod revelation_without_results_tree; pub use public_inputs::PublicInputs; pub use revelation_unproven_offset::RowPath; -// L: maximum number of results -// S: maximum number of items in each result -// PH: maximum number of unique placeholder IDs and values bound for query -// Without this skipping config, the generic parameter was deleted when `cargo fmt`. -#[rustfmt::skip] -pub(crate) const PI_LEN: usize = - PublicInputs::::total_len(); - +/// L: maximum number of results +/// S: maximum number of items in each result +/// PH: maximum number of unique placeholder IDs and values bound for query +pub const fn pi_len() -> usize { + PublicInputs::::total_len() +} pub const NUM_PREPROCESSING_IO: usize = NUM_IO; -#[rustfmt::skip] -pub const NUM_QUERY_IO: usize = QUERY_PI_LEN::; +pub const fn num_query_io() -> usize { + query_pi_len::() +} #[cfg(test)] pub(crate) mod tests { use super::*; @@ -35,10 +34,7 @@ pub(crate) mod tests { use itertools::Itertools; use mp2_common::{array::ToField, poseidon::H, utils::ToFields, F}; use mp2_test::utils::gen_random_u256; - use placeholders_check::{ - placeholder_ids_hash, CheckPlaceholderGadget, CheckedPlaceholder, - NUM_SECONDARY_INDEX_PLACEHOLDERS, - }; + use placeholders_check::{placeholder_ids_hash, CheckPlaceholderGadget, CheckedPlaceholder}; use plonky2::{field::types::PrimeField64, hash::hash_types::HashOut, plonk::config::Hasher}; use rand::{thread_rng, Rng}; use std::{array, iter::once}; @@ -148,7 +144,10 @@ pub(crate) mod tests { // Re-compute the placeholder hash from placeholder_pairs and minmum, // maximum query bounds. Then check it should be same with the specified // final placeholder hash. - let (min_i1, max_i1) = check_placeholder_inputs.primary_query_bounds(); + let (min_i1, max_i1) = ( + check_placeholder_inputs.placeholder_values[0], + check_placeholder_inputs.placeholder_values[1], + ); let placeholder_hash = H::hash_no_pad(&placeholder_hash_payload); // query_placeholder_hash = H(placeholder_hash || min_i2 || max_i2) let inputs = placeholder_hash @@ -196,7 +195,7 @@ pub(crate) mod tests { { // Convert the entry count to an Uint256. let entry_count = U256::from(query_pi.num_matching_rows().to_canonical_u64()); - + let [op_avg, op_count] = [AggregationOperation::AvgOp, AggregationOperation::CountOp].map(|op| op.to_field()); diff --git a/verifiable-db/src/revelation/placeholders_check.rs b/verifiable-db/src/revelation/placeholders_check.rs index 91bb02ca0..84c85fcfa 100644 --- a/verifiable-db/src/revelation/placeholders_check.rs +++ b/verifiable-db/src/revelation/placeholders_check.rs @@ -206,19 +206,15 @@ impl CheckPlaceholderGadget { }; let to_be_checked_placeholders = placeholder_hash_ids .into_iter() - .map(|placeholder_id| compute_checked_placeholder_for_id(placeholder_id)) + .map(&compute_checked_placeholder_for_id) .collect::>>()?; // compute placeholders data to be hashed for secondary query bounds - let min_query_secondary = QueryBound::new_secondary_index_bound( - &placeholders, - &query_bounds.min_query_secondary(), - ) - .unwrap(); - let max_query_secondary = QueryBound::new_secondary_index_bound( - &placeholders, - &query_bounds.max_query_secondary(), - ) - .unwrap(); + let min_query_secondary = + QueryBound::new_secondary_index_bound(placeholders, query_bounds.min_query_secondary()) + .unwrap(); + let max_query_secondary = + QueryBound::new_secondary_index_bound(placeholders, query_bounds.max_query_secondary()) + .unwrap(); let secondary_query_bound_placeholders = [min_query_secondary, max_query_secondary] .into_iter() .flat_map(|query_bound| { @@ -306,10 +302,6 @@ impl CheckPlaceholderGadget { .zip(&self.secondary_query_bound_placeholders) .for_each(|(t, v)| v.assign(pw, t)); } - // Return the query bounds on the primary index, taken from the placeholder values - pub(crate) fn primary_query_bounds(&self) -> (U256, U256) { - (self.placeholder_values[0], self.placeholder_values[1]) - } } /// This gadget checks that the placeholders identifiers and values employed to @@ -397,10 +389,10 @@ pub(crate) fn check_placeholders( // Check the placeholder hash of proof is computed only from expected placeholder values. let mut placeholder_hash_payload = vec![]; - for i in 0..PP { + for item in to_be_checked_placeholder.iter().take(PP) { // Accumulate the placeholder identifiers and values for computing the // placeholder hash. - let CheckedPlaceholderTarget { id, value, pos } = &to_be_checked_placeholder[i]; + let CheckedPlaceholderTarget { id, value, pos } = item; let payload = once(*id).chain(value.to_targets()); placeholder_hash_payload.extend(payload); @@ -408,8 +400,8 @@ pub(crate) fn check_placeholders( } // check placeholders related to secondary index bounds - for i in 0..2 { - let CheckedPlaceholderTarget { id, value, pos } = &secondary_query_bound_placeholder[i]; + for item in secondary_query_bound_placeholder.iter().take(2) { + let CheckedPlaceholderTarget { id, value, pos } = item; check_placeholder_pair(id, value, *pos); } @@ -467,7 +459,7 @@ pub(crate) fn placeholder_ids_hash mod tests { use super::*; use crate::revelation::tests::TestPlaceholders; - use mp2_common::{u256::WitnessWriteU256, C, D, F}; + use mp2_common::{C, D, F}; use mp2_test::circuit::{run_circuit, UserCircuit}; use plonky2::{ field::types::Field, diff --git a/verifiable-db/src/revelation/public_inputs.rs b/verifiable-db/src/revelation/public_inputs.rs index c387d6de6..5fca5c4ee 100644 --- a/verifiable-db/src/revelation/public_inputs.rs +++ b/verifiable-db/src/revelation/public_inputs.rs @@ -183,7 +183,7 @@ impl<'a, T: Clone, const L: usize, const S: usize, const PH: usize> PublicInputs query_offset: &input[Self::PI_RANGES[9].clone()][0], } } - + #[allow(clippy::too_many_arguments)] pub fn new( original_block_hash: &'a [T], flat_computational_hash: &'a [T], @@ -398,7 +398,7 @@ mod tests { type PI<'a> = PublicInputs<'a, F, L, S, PH>; type PITargets<'a> = PublicInputs<'a, Target, L, S, PH>; - const PI_LEN: usize = crate::revelation::PI_LEN::; + const PI_LEN: usize = crate::revelation::pi_len::(); #[derive(Clone, Debug)] struct TestPublicInputs<'a> { diff --git a/verifiable-db/src/revelation/revelation_unproven_offset.rs b/verifiable-db/src/revelation/revelation_unproven_offset.rs index 15241c814..83207862c 100644 --- a/verifiable-db/src/revelation/revelation_unproven_offset.rs +++ b/verifiable-db/src/revelation/revelation_unproven_offset.rs @@ -55,19 +55,20 @@ use crate::{ api::CircuitInput as QueryCircuitInput, computational_hash_ids::{AggregationOperation, ColumnIDs, ResultIdentifier}, merkle_path::{MerklePathGadget, MerklePathTargetInputs}, + pi_len, public_inputs::PublicInputs as QueryProofPublicInputs, universal_circuit::{ build_cells_tree, universal_circuit_inputs::{BasicOperation, Placeholders, ResultStructure}, }, - PI_LEN, }, }; use super::{ + num_query_io, pi_len as revelation_pi_len, placeholders_check::{CheckPlaceholderGadget, CheckPlaceholderInputWires}, revelation_without_results_tree::CircuitBuilderParams, - PublicInputs, NUM_PREPROCESSING_IO, NUM_QUERY_IO, PI_LEN as REVELATION_PI_LEN, + PublicInputs, NUM_PREPROCESSING_IO, }; #[derive(Clone, Debug, Serialize, Deserialize)] @@ -289,7 +290,6 @@ where { pub(crate) fn new( row_paths: [RowPath; L], - index_ids: [u64; 2], item_ids: &[F], results: [Vec; L], limit: u32, @@ -302,13 +302,9 @@ where let mut row_node_info = [NodeInfo::default(); L]; let mut index_node_info = [NodeInfo::default(); L]; for (i, row) in row_paths.into_iter().enumerate() { - row_tree_paths[i] = - MerklePathGadget::new(&row.row_tree_path, &row.row_path_siblings, index_ids[1])?; - index_tree_paths[i] = MerklePathGadget::new( - &row.index_tree_path, - &row.index_path_siblings, - index_ids[0], - )?; + row_tree_paths[i] = MerklePathGadget::new(&row.row_tree_path, &row.row_path_siblings)?; + index_tree_paths[i] = + MerklePathGadget::new(&row.index_tree_path, &row.index_path_siblings)?; row_node_info[i] = row.row_node_info; index_node_info[i] = row.index_node_info; } @@ -319,7 +315,7 @@ where format!("number of results per row is bigger than {}", S) ); let padded_ids = item_ids - .into_iter() + .iter() .chain(repeat(&F::default())) .take(S) .cloned() @@ -328,7 +324,7 @@ where .iter() .flat_map(|res| { assert!(res.len() >= num_actual_items_per_row); - res.into_iter() + res.iter() .cloned() .take(num_actual_items_per_row) .chain(repeat(U256::default())) @@ -388,118 +384,114 @@ where // Flag employed to enforce that the matching rows are all placed in the initial slots; // this is a requirement to ensure that the check for DISTINCT is sound let mut only_matching_rows = _true; - row_proofs - .into_iter() - .enumerate() - .for_each(|(i, row_proof)| { - let index_ids = row_proof.index_ids_target(); - let is_matching_row = b.is_equal(row_proof.num_matching_rows_target(), one); - // ensure that once `is_matching_row = false`, then it will be false for all - // subsequent iterations - only_matching_rows = b.and(only_matching_rows, is_matching_row); - b.connect(only_matching_rows.target, is_matching_row.target); - let row_node_hash = { - // if the node storing the current row is a leaf node in rows tree, then - // the hash of such node is already computed by `row_proof`; otherwise, - // we need to compute it - let inputs = row_node_info[i] - .child_hashes - .into_iter() - .flat_map(|hash| hash.to_targets()) - .chain(row_node_info[i].node_min.to_targets()) - .chain(row_node_info[i].node_max.to_targets()) - .chain(once(index_ids[1])) - .chain(row_proof.min_value_target().to_targets()) - .chain(row_proof.tree_hash_target().to_targets()) - .collect_vec(); - let row_node_hash = b.hash_n_to_hash_no_pad::(inputs); - b.select_hash( - is_row_node_leaf[i], - &row_proof.tree_hash_target(), - &row_node_hash, - ) - }; - let row_path_wires = MerklePathGadget::build(b, row_node_hash, index_ids[1]); - let row_tree_root = row_path_wires.root; - // compute hash of the index node storing the rows tree containing the current row - let index_node_hash = { - let inputs = index_node_info[i] - .child_hashes - .into_iter() - .flat_map(|hash| hash.to_targets()) - .chain(index_node_info[i].node_min.to_targets()) - .chain(index_node_info[i].node_max.to_targets()) - .chain(once(index_ids[0])) - .chain(row_proof.index_value_target().to_targets()) - .chain(row_tree_root.to_targets()) - .collect_vec(); - b.hash_n_to_hash_no_pad::(inputs) - }; - let index_path_wires = MerklePathGadget::build(b, index_node_hash, index_ids[0]); - // if the current row is valid, check that the root is the same of the original tree, completing - // membership proof for the current row; otherwise, we don't care - let root = b.select_hash(is_matching_row, &index_path_wires.root, &tree_hash); - b.connect_hashes(tree_hash, root); + row_proofs.iter().enumerate().for_each(|(i, row_proof)| { + let index_ids = row_proof.index_ids_target(); + let is_matching_row = b.is_equal(row_proof.num_matching_rows_target(), one); + // ensure that once `is_matching_row = false`, then it will be false for all + // subsequent iterations + only_matching_rows = b.and(only_matching_rows, is_matching_row); + b.connect(only_matching_rows.target, is_matching_row.target); + let row_node_hash = { + // if the node storing the current row is a leaf node in rows tree, then + // the hash of such node is already computed by `row_proof`; otherwise, + // we need to compute it + let inputs = row_node_info[i] + .child_hashes + .into_iter() + .flat_map(|hash| hash.to_targets()) + .chain(row_node_info[i].node_min.to_targets()) + .chain(row_node_info[i].node_max.to_targets()) + .chain(once(index_ids[1])) + .chain(row_proof.min_value_target().to_targets()) + .chain(row_proof.tree_hash_target().to_targets()) + .collect_vec(); + let row_node_hash = b.hash_n_to_hash_no_pad::(inputs); + b.select_hash( + is_row_node_leaf[i], + &row_proof.tree_hash_target(), + &row_node_hash, + ) + }; + let row_path_wires = MerklePathGadget::build(b, row_node_hash, index_ids[1]); + let row_tree_root = row_path_wires.root; + // compute hash of the index node storing the rows tree containing the current row + let index_node_hash = { + let inputs = index_node_info[i] + .child_hashes + .into_iter() + .flat_map(|hash| hash.to_targets()) + .chain(index_node_info[i].node_min.to_targets()) + .chain(index_node_info[i].node_max.to_targets()) + .chain(once(index_ids[0])) + .chain(row_proof.index_value_target().to_targets()) + .chain(row_tree_root.to_targets()) + .collect_vec(); + b.hash_n_to_hash_no_pad::(inputs) + }; + let index_path_wires = MerklePathGadget::build(b, index_node_hash, index_ids[0]); + // if the current row is valid, check that the root is the same of the original tree, completing + // membership proof for the current row; otherwise, we don't care + let root = b.select_hash(is_matching_row, &index_path_wires.root, &tree_hash); + b.connect_hashes(tree_hash, root); - row_paths.push(row_path_wires.inputs); - index_paths.push(index_path_wires.inputs); - // check that the primary index value for the current row is within the query - // bounds (only if the row is valid) - let index_value = row_proof.index_value_target(); - let greater_than_min = b.is_less_or_equal_than_u256(&min_query, &index_value); - let smaller_than_max = b.is_less_or_equal_than_u256(&index_value, &max_query); - let in_range = b.and(greater_than_min, smaller_than_max); - let in_range = b.and(is_matching_row, in_range); - b.connect(in_range.target, is_matching_row.target); + row_paths.push(row_path_wires.inputs); + index_paths.push(index_path_wires.inputs); + // check that the primary index value for the current row is within the query + // bounds (only if the row is valid) + let index_value = row_proof.index_value_target(); + let greater_than_min = b.is_less_or_equal_than_u256(&min_query, &index_value); + let smaller_than_max = b.is_less_or_equal_than_u256(&index_value, &max_query); + let in_range = b.and(greater_than_min, smaller_than_max); + let in_range = b.and(is_matching_row, in_range); + b.connect(in_range.target, is_matching_row.target); - // enforce DISTINCT only for actual results: we enforce the i-th actual result is strictly smaller - // than the (i+1)-th actual result - max_result = if let Some(res) = &max_result { - let current_result: [UInt256Target; S] = - get_result(i).to_vec().try_into().unwrap(); - let is_smaller = b.is_less_than_or_equal_to_u256_arr(res, ¤t_result).0; - // flag specifying whether we must enforce DISTINCT for the current result or not - let must_be_enforced = b.and(is_matching_row, distinct); - let is_smaller = b.and(must_be_enforced, is_smaller); - b.connect(is_smaller.target, must_be_enforced.target); - Some(current_result) - } else { - Some(get_result(i).to_vec().try_into().unwrap()) - }; + // enforce DISTINCT only for actual results: we enforce the i-th actual result is strictly smaller + // than the (i+1)-th actual result + max_result = if let Some(res) = &max_result { + let current_result: [UInt256Target; S] = get_result(i).to_vec().try_into().unwrap(); + let is_smaller = b.is_less_than_or_equal_to_u256_arr(res, ¤t_result).0; + // flag specifying whether we must enforce DISTINCT for the current result or not + let must_be_enforced = b.and(is_matching_row, distinct); + let is_smaller = b.and(must_be_enforced, is_smaller); + b.connect(is_smaller.target, must_be_enforced.target); + Some(current_result) + } else { + Some(get_result(i).to_vec().try_into().unwrap()) + }; - // Expose results for this row. - // First, we compute the digest of the results corresponding to this row, as computed in the universal - // query circuit, to check that the results correspond to the one computed by that circuit. - // To recompute the digest of the results, we first need to build the cells tree that is constructed - // in the universal query circuit to store the results computed for each row. Note that the - // universal query circuit stores results in a cells tree since to prove some queries a results tree - // needs to be built - let cells_tree_hash = - build_cells_tree(b, &get_result(i)[2..], &ids[2..], &is_item_included[2..]); - let second_item = b.select_u256(is_item_included[1], &get_result(i)[1], &zero_u256); - // digest = D(ids[0]||result[0]||ids[1]||second_item||cells_tree_hash) - let digest = { - let inputs = once(ids[0]) - .chain(get_result(i)[0].to_targets()) - .chain(once(ids[1])) - .chain(second_item.to_targets()) - .chain(cells_tree_hash.to_targets()) - .collect_vec(); - b.map_to_curve_point(&inputs) - }; - // we need to check that the digests are equal only if the current row is valid - let digest_equal = b.curve_eq(digest, row_proof.first_value_as_curve_target()); - let digest_equal = b.and(digest_equal, is_matching_row); - b.connect(is_matching_row.target, digest_equal.target); - num_results = b.add(num_results, is_matching_row.target); + // Expose results for this row. + // First, we compute the digest of the results corresponding to this row, as computed in the universal + // query circuit, to check that the results correspond to the one computed by that circuit. + // To recompute the digest of the results, we first need to build the cells tree that is constructed + // in the universal query circuit to store the results computed for each row. Note that the + // universal query circuit stores results in a cells tree since to prove some queries a results tree + // needs to be built + let cells_tree_hash = + build_cells_tree(b, &get_result(i)[2..], &ids[2..], &is_item_included[2..]); + let second_item = b.select_u256(is_item_included[1], &get_result(i)[1], &zero_u256); + // digest = D(ids[0]||result[0]||ids[1]||second_item||cells_tree_hash) + let digest = { + let inputs = once(ids[0]) + .chain(get_result(i)[0].to_targets()) + .chain(once(ids[1])) + .chain(second_item.to_targets()) + .chain(cells_tree_hash.to_targets()) + .collect_vec(); + b.map_to_curve_point(&inputs) + }; + // we need to check that the digests are equal only if the current row is valid + let digest_equal = b.curve_eq(digest, row_proof.first_value_as_curve_target()); + let digest_equal = b.and(digest_equal, is_matching_row); + b.connect(is_matching_row.target, digest_equal.target); + num_results = b.add(num_results, is_matching_row.target); - // check that placeholder hash and computational hash are the same for all - // the proofs - b.connect_hashes(row_proof.computational_hash_target(), computational_hash); - b.connect_hashes(row_proof.placeholder_hash_target(), placeholder_hash); + // check that placeholder hash and computational hash are the same for all + // the proofs + b.connect_hashes(row_proof.computational_hash_target(), computational_hash); + b.connect_hashes(row_proof.placeholder_hash_target(), placeholder_hash); - overflow = b.or(overflow, row_proof.overflow_flag_target()); - }); + overflow = b.or(overflow, row_proof.overflow_flag_target()); + }); // finally, check placeholders // First, compute the final placeholder hash, adding the primary index query bounds @@ -656,7 +648,7 @@ where [(); MAX_NUM_COLUMNS + MAX_NUM_RESULT_OPS]:, [(); 2 * (MAX_NUM_PREDICATE_OPS + MAX_NUM_RESULT_OPS)]:, [(); MAX_NUM_ITEMS_PER_OUTPUT - 1]:, - [(); PI_LEN::]:, + [(); pi_len::()]:, [(); >::HASH_SIZE]:, { // we generate a dummy proof for a dummy node of the index tree with an index value out of range @@ -766,21 +758,21 @@ where [(); ROW_TREE_MAX_DEPTH - 1]:, [(); INDEX_TREE_MAX_DEPTH - 1]:, [(); S * L]:, - [(); NUM_QUERY_IO::]:, + [(); num_query_io::()]:, [(); >::HASH_SIZE]:, { type CircuitBuilderParams = CircuitBuilderParams; type Inputs = RecursiveCircuitInputs; - const NUM_PUBLIC_INPUTS: usize = REVELATION_PI_LEN::; + const NUM_PUBLIC_INPUTS: usize = revelation_pi_len::(); fn circuit_logic( builder: &mut CBuilder, _verified_proofs: [&ProofWithPublicInputsTarget; 0], builder_parameters: Self::CircuitBuilderParams, ) -> Self { - let row_verifier = RecursiveCircuitsVerifierGagdet:: }>::new( + let row_verifier = RecursiveCircuitsVerifierGagdet::() }>::new( default_config(), &builder_parameters.query_circuit_set, ); @@ -798,7 +790,7 @@ where .iter() .map(|verifier| { QueryProofPublicInputs::from_slice( - verifier.get_public_input_targets:: }>(), + verifier.get_public_input_targets::() }>(), ) }) .collect_vec(); @@ -846,7 +838,7 @@ mod tests { utils::{gen_random_field_hash, gen_random_u256}, }; use plonky2::{ - field::types::{Field, PrimeField64, Sample}, + field::types::{Field, Sample}, iop::{ target::Target, witness::{PartialWitness, WitnessWrite}, @@ -865,8 +857,8 @@ mod tests { public_inputs::{PublicInputs as QueryProofPublicInputs, QueryPublicInputs}, }, revelation::{ - revelation_unproven_offset::RowPath, tests::TestPlaceholders, NUM_PREPROCESSING_IO, - NUM_QUERY_IO, + num_query_io, revelation_unproven_offset::RowPath, tests::TestPlaceholders, + NUM_PREPROCESSING_IO, }, test_utils::{random_aggregation_operations, random_aggregation_public_inputs}, }; @@ -894,7 +886,6 @@ mod tests { } impl< - 'a, const ROW_TREE_MAX_DEPTH: usize, const INDEX_TREE_MAX_DEPTH: usize, const L: usize, @@ -902,7 +893,7 @@ mod tests { const PH: usize, const PP: usize, > UserCircuit - for TestRevelationCircuit<'a, ROW_TREE_MAX_DEPTH, INDEX_TREE_MAX_DEPTH, L, S, PH, PP> + for TestRevelationCircuit<'_, ROW_TREE_MAX_DEPTH, INDEX_TREE_MAX_DEPTH, L, S, PH, PP> where [(); ROW_TREE_MAX_DEPTH - 1]:, [(); INDEX_TREE_MAX_DEPTH - 1]:, @@ -916,14 +907,14 @@ mod tests { fn build(c: &mut CircuitBuilder) -> Self::Wires { let row_pis_raw: [Vec; L] = (0..L) - .map(|_| c.add_virtual_targets(NUM_QUERY_IO::)) + .map(|_| c.add_virtual_targets(num_query_io::())) .collect_vec() .try_into() .unwrap(); let original_pis_raw = c.add_virtual_targets(NUM_PREPROCESSING_IO); let row_pis = row_pis_raw .iter() - .map(|pis| QueryProofPublicInputs::from_slice(&pis)) + .map(|pis| QueryProofPublicInputs::from_slice(pis)) .collect_vec() .try_into() .unwrap(); @@ -944,7 +935,7 @@ mod tests { // test function for this revelation circuit. If `distinct` is true, then the // results are enforced to be distinct - async fn test_revelation_unproven_offset_circuit(distinct: bool) { + async fn test_revelation_unproven_offset_circuit() { const ROW_TREE_MAX_DEPTH: usize = 10; const INDEX_TREE_MAX_DEPTH: usize = 10; const L: usize = 5; @@ -1105,7 +1096,7 @@ mod tests { node_5.max, ) }; - let node_B = { + let node_b = { let row_pi = QueryProofPublicInputs::<_, S>::from_slice(&row_pis[2]); let embedded_tree_hash = HashOutput::try_from(node_2.compute_node_hash(index_ids[1]).to_bytes()).unwrap(); @@ -1120,7 +1111,7 @@ mod tests { node_value, ) }; - let node_C = { + let node_c = { let row_pi = QueryProofPublicInputs::<_, S>::from_slice(&row_pis[4]); let embedded_tree_hash = HashOutput::try_from(node_3.compute_node_hash(index_ids[1]).to_bytes()).unwrap(); @@ -1137,11 +1128,11 @@ mod tests { node_value, ) }; - let node_B_hash = - HashOutput::try_from(node_B.compute_node_hash(index_ids[0]).to_bytes()).unwrap(); - let node_C_hash = - HashOutput::try_from(node_C.compute_node_hash(index_ids[0]).to_bytes()).unwrap(); - let node_A = { + let node_b_hash = + HashOutput::try_from(node_b.compute_node_hash(index_ids[0]).to_bytes()).unwrap(); + let node_c_hash = + HashOutput::try_from(node_c.compute_node_hash(index_ids[0]).to_bytes()).unwrap(); + let node_a = { let row_pi = QueryProofPublicInputs::<_, S>::from_slice(&row_pis[0]); let embedded_tree_hash = HashOutput::try_from(node_0.compute_node_hash(index_ids[1]).to_bytes()).unwrap(); @@ -1151,15 +1142,15 @@ mod tests { row_pis[1][index_value_range].copy_from_slice(&node_value.to_fields()); NodeInfo::new( &embedded_tree_hash, - Some(&node_B_hash), // left child is node B - Some(&node_C_hash), // right child is node C + Some(&node_b_hash), // left child is node B + Some(&node_c_hash), // right child is node C node_value, - node_B.min, - node_C.max, + node_b.min, + node_c.max, ) }; // set original tree PI to the root of the tree - let root = node_A.compute_node_hash(index_ids[0]); + let root = node_a.compute_node_hash(index_ids[0]); original_tree_pis[ORIGINAL_TREE_H_RANGE].copy_from_slice(&root.to_fields()); // sample final results and set order-agnostic digests in row_pis proofs accordingly @@ -1220,9 +1211,9 @@ mod tests { // prepare RowPath inputs for each row let row_path_1 = RowPath { row_node_info: node_1, - row_tree_path: vec![(node_0.clone(), ChildPosition::Left)], + row_tree_path: vec![(node_0, ChildPosition::Left)], row_path_siblings: vec![None], - index_node_info: node_A.clone(), + index_node_info: node_a, index_tree_path: vec![], index_path_siblings: vec![], }; @@ -1230,7 +1221,7 @@ mod tests { row_node_info: node_0, row_tree_path: vec![], row_path_siblings: vec![], - index_node_info: node_A.clone(), + index_node_info: node_a, index_tree_path: vec![], index_path_siblings: vec![], }; @@ -1238,37 +1229,31 @@ mod tests { row_node_info: node_2, row_tree_path: vec![], row_path_siblings: vec![], - index_node_info: node_B.clone(), - index_tree_path: vec![(node_A.clone(), ChildPosition::Left)], - index_path_siblings: vec![Some(node_C_hash)], + index_node_info: node_b, + index_tree_path: vec![(node_a, ChildPosition::Left)], + index_path_siblings: vec![Some(node_c_hash)], }; let row_path_4 = RowPath { row_node_info: node_4, - row_tree_path: vec![(node_3.clone(), ChildPosition::Left)], + row_tree_path: vec![(node_3, ChildPosition::Left)], row_path_siblings: vec![Some(node_5_hash)], - index_node_info: node_C.clone(), - index_tree_path: vec![(node_A.clone(), ChildPosition::Right)], - index_path_siblings: vec![Some(node_B_hash.clone())], + index_node_info: node_c, + index_tree_path: vec![(node_a, ChildPosition::Right)], + index_path_siblings: vec![Some(node_b_hash)], }; let row_path_3 = RowPath { row_node_info: node_3, row_tree_path: vec![], row_path_siblings: vec![], - index_node_info: node_C.clone(), - index_tree_path: vec![(node_A.clone(), ChildPosition::Right)], - index_path_siblings: vec![Some(node_B_hash)], + index_node_info: node_c, + index_tree_path: vec![(node_a, ChildPosition::Right)], + index_path_siblings: vec![Some(node_b_hash)], }; let circuit = TestRevelationCircuit:: { circuit: RevelationCircuit::new( [row_path_0, row_path_1, row_path_2, row_path_3, row_path_4], - index_ids - .into_iter() - .map(|id| id.to_canonical_u64()) - .collect_vec() - .try_into() - .unwrap(), &ids, results.map(|res| res.to_vec()), 0, @@ -1281,16 +1266,16 @@ mod tests { original_tree_pis: &original_tree_pis, }; - let proof = run_circuit::(circuit); + let _ = run_circuit::(circuit); } #[tokio::test] async fn test_revelation_unproven_offset_circuit_no_distinct() { - test_revelation_unproven_offset_circuit(false).await + test_revelation_unproven_offset_circuit().await } #[tokio::test] async fn test_revelation_unproven_offset_circuit_distinct() { - test_revelation_unproven_offset_circuit(true).await + test_revelation_unproven_offset_circuit().await } } diff --git a/verifiable-db/src/revelation/revelation_without_results_tree.rs b/verifiable-db/src/revelation/revelation_without_results_tree.rs index f041f0a02..77af8f9d7 100644 --- a/verifiable-db/src/revelation/revelation_without_results_tree.rs +++ b/verifiable-db/src/revelation/revelation_without_results_tree.rs @@ -43,8 +43,9 @@ use recursion_framework::{ use serde::{Deserialize, Serialize}; use super::{ + num_query_io, pi_len as revelation_pi_len, placeholders_check::{CheckPlaceholderGadget, CheckPlaceholderInputWires}, - NUM_PREPROCESSING_IO, NUM_QUERY_IO, PI_LEN as REVELATION_PI_LEN, + NUM_PREPROCESSING_IO, }; // L: maximum number of results @@ -223,24 +224,25 @@ impl CircuitLo for RecursiveCircuitWires where [(); S - 1]:, - [(); NUM_QUERY_IO::]:, + [(); num_query_io::()]:, [(); >::HASH_SIZE]:, { type CircuitBuilderParams = CircuitBuilderParams; type Inputs = RecursiveCircuitInputs; - const NUM_PUBLIC_INPUTS: usize = REVELATION_PI_LEN::; + const NUM_PUBLIC_INPUTS: usize = revelation_pi_len::(); fn circuit_logic( builder: &mut CircuitBuilder, _verified_proofs: [&ProofWithPublicInputsTarget; 0], builder_parameters: Self::CircuitBuilderParams, ) -> Self { - let query_verifier = RecursiveCircuitsVerifierGagdet:: }>::new( - default_config(), - &builder_parameters.query_circuit_set, - ); + let query_verifier = + RecursiveCircuitsVerifierGagdet::() }>::new( + default_config(), + &builder_parameters.query_circuit_set, + ); let query_verifier = query_verifier.verify_proof_in_circuit_set(builder); let preprocessing_verifier = RecursiveCircuitsVerifierGagdet::::new( @@ -252,7 +254,7 @@ where &builder_parameters.preprocessing_vk, ); let query_pi = QueryProofPublicInputs::from_slice( - query_verifier.get_public_input_targets:: }>(), + query_verifier.get_public_input_targets::() }>(), ); let preprocessing_pi = OriginalTreePublicInputs::from_slice(&preprocessing_proof.public_inputs); @@ -305,7 +307,7 @@ mod tests { // Real number of the placeholders const NUM_PLACEHOLDERS: usize = 5; - const QUERY_PI_LEN: usize = crate::query::PI_LEN::; + const QUERY_PI_LEN: usize = crate::query::pi_len::(); impl From<&TestPlaceholders> for RevelationWithoutResultsTreeCircuit { fn from(test_placeholders: &TestPlaceholders) -> Self { diff --git a/verifiable-db/src/row_tree/api.rs b/verifiable-db/src/row_tree/api.rs index 4f4bfe932..e8fa9b90c 100644 --- a/verifiable-db/src/row_tree/api.rs +++ b/verifiable-db/src/row_tree/api.rs @@ -238,7 +238,6 @@ mod test { use super::*; use mp2_common::{ - group_hashing::{cond_field_hashed_scalar_mul, map_to_curve_point}, poseidon::{empty_poseidon_hash, H}, utils::ToFields, F, diff --git a/verifiable-db/src/row_tree/full_node.rs b/verifiable-db/src/row_tree/full_node.rs index 5a4ae96c0..b01e996b9 100644 --- a/verifiable-db/src/row_tree/full_node.rs +++ b/verifiable-db/src/row_tree/full_node.rs @@ -142,12 +142,7 @@ impl CircuitLogicWires for RecursiveFullWires { pub(crate) mod test { use alloy::primitives::U256; - use mp2_common::{ - group_hashing::{cond_field_hashed_scalar_mul, map_to_curve_point}, - poseidon::H, - utils::ToFields, - C, D, F, - }; + use mp2_common::{poseidon::H, utils::ToFields, C, D, F}; use mp2_test::{ circuit::{run_circuit, UserCircuit}, utils::weierstrass_to_point, diff --git a/verifiable-db/src/row_tree/leaf.rs b/verifiable-db/src/row_tree/leaf.rs index f28c23646..fe03ded58 100644 --- a/verifiable-db/src/row_tree/leaf.rs +++ b/verifiable-db/src/row_tree/leaf.rs @@ -131,12 +131,7 @@ impl CircuitLogicWires for RecursiveLeafWires { mod test { use alloy::primitives::U256; - use mp2_common::{ - group_hashing::{cond_field_hashed_scalar_mul, map_to_curve_point}, - poseidon::empty_poseidon_hash, - utils::ToFields, - CHasher, C, D, F, - }; + use mp2_common::{poseidon::empty_poseidon_hash, utils::ToFields, CHasher, C, D, F}; use mp2_test::circuit::{run_circuit, UserCircuit}; use plonky2::{ field::types::Sample, diff --git a/verifiable-db/src/row_tree/partial_node.rs b/verifiable-db/src/row_tree/partial_node.rs index e9056826e..b78bd47f2 100644 --- a/verifiable-db/src/row_tree/partial_node.rs +++ b/verifiable-db/src/row_tree/partial_node.rs @@ -176,12 +176,7 @@ impl CircuitLogicWires for RecursivePartialWires { #[cfg(test)] pub mod test { - use mp2_common::{ - group_hashing::{cond_field_hashed_scalar_mul, map_to_curve_point}, - poseidon::empty_poseidon_hash, - utils::ToFields, - CHasher, - }; + use mp2_common::{poseidon::empty_poseidon_hash, utils::ToFields, CHasher}; use plonky2::{hash::hash_types::HashOut, plonk::config::Hasher}; use plonky2_ecgfp5::curve::curve::Point; diff --git a/verifiable-db/src/row_tree/public_inputs.rs b/verifiable-db/src/row_tree/public_inputs.rs index 73f251567..2dac37010 100644 --- a/verifiable-db/src/row_tree/public_inputs.rs +++ b/verifiable-db/src/row_tree/public_inputs.rs @@ -38,7 +38,7 @@ pub struct PublicInputs<'a, T> { pub(crate) merge: &'a [T], } -impl<'a> PublicInputCommon for PublicInputs<'a, Target> { +impl PublicInputCommon for PublicInputs<'_, Target> { const RANGES: &'static [PublicInputRange] = &[H_RANGE, DR_RANGE, MIN_RANGE, MAX_RANGE, MERGE_RANGE]; diff --git a/verifiable-db/src/test_utils.rs b/verifiable-db/src/test_utils.rs index 881ba427b..02e2b5fbb 100644 --- a/verifiable-db/src/test_utils.rs +++ b/verifiable-db/src/test_utils.rs @@ -7,6 +7,7 @@ use crate::{ computational_hash_ids::{ AggregationOperation, ColumnIDs, Identifiers, Operation, PlaceholderIdentifier, }, + pi_len, public_inputs::{ PublicInputs as QueryPI, PublicInputs as QueryProofPublicInputs, PublicInputs, QueryPublicInputs, @@ -14,7 +15,6 @@ use crate::{ universal_circuit::universal_circuit_inputs::{ BasicOperation, ColumnCell, InputOperand, OutputItem, Placeholders, ResultStructure, }, - PI_LEN, }, revelation::NUM_PREPROCESSING_IO, }; @@ -115,7 +115,7 @@ pub fn random_aggregation_public_inputs( }); array::from_fn(|_| { - let mut pi = (0..PI_LEN::) + let mut pi = (0..pi_len::()) .map(|_| rng.gen()) .collect::>() .to_fields();