Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use Batching Query Public Inputs for Tabular Queries + Remove Old Circuits #417

Merged
Merged
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions groth16-framework/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ itertools.workspace = true
rand.workspace = true
serial_test.workspace = true
sha2.workspace = true
mp2_test = { path = "../mp2-test" }

recursion_framework = { path = "../recursion-framework" }
verifiable-db = { path = "../verifiable-db" }
6 changes: 5 additions & 1 deletion groth16-framework/tests/common/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,11 @@
use super::{NUM_PREPROCESSING_IO, NUM_QUERY_IO};
use groth16_framework::{compile_and_generate_assets, utils::clone_circuit_data};
use mp2_common::{C, D, F};
use mp2_test::circuit::TestDummyCircuit;
use recursion_framework::framework_testing::TestingRecursiveCircuits;
use verifiable_db::{
api::WrapCircuitParams,
query::pi_len,
revelation::api::Parameters as RevelationParameters,
test_utils::{
INDEX_TREE_MAX_DEPTH, MAX_NUM_COLUMNS, MAX_NUM_ITEMS_PER_OUTPUT, MAX_NUM_OUTPUTS,
Expand Down Expand Up @@ -40,6 +42,8 @@ impl TestContext {

// Generate a fake query circuit set.
let query_circuits = TestingRecursiveCircuits::<F, C, D, NUM_QUERY_IO>::default();
let dummy_universal_circuit =
TestDummyCircuit::<{ pi_len::<MAX_NUM_ITEMS_PER_OUTPUT>() }>::build();

// Create the revelation parameters.
let revelation_params = RevelationParameters::<
Expand All @@ -53,7 +57,7 @@ impl TestContext {
MAX_NUM_PLACEHOLDERS,
>::build(
query_circuits.get_recursive_circuit_set(), // unused, so we provide a dummy one
query_circuits.get_recursive_circuit_set(),
dummy_universal_circuit.circuit_data().verifier_data(),
preprocessing_circuits.get_recursive_circuit_set(),
preprocessing_circuits
.verifier_data_for_input_proofs::<1>()
Expand Down
7 changes: 1 addition & 6 deletions groth16-framework/tests/common/query.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,7 @@ impl TestContext {
.unwrap();
let revelation_proof = self
.revelation_params
.generate_proof(
input,
self.query_circuits.get_recursive_circuit_set(),
self.query_circuits.get_recursive_circuit_set(),
None,
)
.generate_proof(input, self.query_circuits.get_recursive_circuit_set(), None)
.unwrap();
let revelation_proof = ProofWithVK::deserialize(&revelation_proof).unwrap();
let (revelation_proof_with_pi, _) = revelation_proof.clone().into();
Expand Down
35 changes: 1 addition & 34 deletions mp2-common/src/eth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ mod test {
types::MAX_BLOCK_LEN,
utils::{Endianness, Packer},
};
use mp2_test::eth::{get_mainnet_url, get_sepolia_url};
use mp2_test::eth::get_sepolia_url;

#[tokio::test]
#[ignore]
Expand Down Expand Up @@ -426,39 +426,6 @@ mod test {
Ok(())
}

#[tokio::test]
async fn test_pidgy_pinguin_mapping_slot() -> Result<()> {
// first pinguin holder https://dune.com/queries/2450476/4027653
// holder: 0x188b264aa1456b869c3a92eeed32117ebb835f47
// NFT id https://opensea.io/assets/ethereum/0xbd3531da5cf5857e7cfaa92426877b022e612cf8/1116
let mapping_value =
Address::from_str("0x188B264AA1456B869C3a92eeeD32117EbB835f47").unwrap();
let nft_id: u32 = 1116;
let mapping_key = left_pad32(&nft_id.to_be_bytes());
let url = get_mainnet_url();
let provider = ProviderBuilder::new().on_http(url.parse().unwrap());

// extracting from
// https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/token/ERC721/ERC721.sol
// assuming it's using ERC731Enumerable that inherits ERC721
let mapping_slot = 2;
// pudgy pinguins
let pudgy_address = Address::from_str("0xBd3531dA5CF5857e7CfAA92426877b022e612cf8")?;
let query = ProofQuery::new_mapping_slot(pudgy_address, mapping_slot, mapping_key.to_vec());
let res = query
.query_mpt_proof(&provider, BlockNumberOrTag::Latest)
.await?;
let raw_address = ProofQuery::verify_storage_proof(&res)?;
// the value is actually RLP encoded !
let decoded_address: Vec<u8> = rlp::decode(&raw_address).unwrap();
let leaf_node: Vec<Vec<u8>> = rlp::decode_list(res.storage_proof[0].proof.last().unwrap());
println!("leaf_node[1].len() = {}", leaf_node[1].len());
// this is read in the same order
let found_address = Address::from_slice(&decoded_address.into_iter().collect::<Vec<u8>>());
assert_eq!(found_address, mapping_value);
Ok(())
}

#[tokio::test]
async fn test_kashish_contract_proof_query() -> Result<()> {
// https://sepolia.etherscan.io/address/0xd6a2bFb7f76cAa64Dad0d13Ed8A9EFB73398F39E#code
Expand Down
1 change: 0 additions & 1 deletion mp2-v1/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,4 +59,3 @@ parsil = { path = "../parsil" }

[features]
original_poseidon = ["mp2_common/original_poseidon"]
batching_circuits = ["verifiable-db/batching_circuits"]
119 changes: 64 additions & 55 deletions mp2-v1/src/query/batching_planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@ use ryhope::{
storage::{updatetree::UpdateTree, WideLineage},
Epoch,
};
use serde::{Deserialize, Serialize};
use verifiable_db::query::{
batching::{NodePath, RowInput, TreePathInputs},
api::{NodePath, RowInput, TreePathInputs},
computational_hash_ids::ColumnIDs,
universal_circuit::universal_circuit_inputs::{ColumnCell, RowCells},
};
Expand Down Expand Up @@ -118,54 +119,62 @@ async fn generate_chunks<const CHUNK_SIZE: usize, C: ContextProvider>(
.cloned()
.collect::<BTreeSet<_>>();

Ok(stream::iter(sorted_index_values.into_iter())
.then(async |index_value| {
let index_path = index_cache
.compute_path(&index_value, current_epoch)
let prove_rows = async |index_value| {
let index_path = index_cache
.compute_path(&index_value, current_epoch)
.await
.unwrap_or_else(|| panic!("node with key {index_value} not found in index tree cache"));
let proven_rows = if let Some(matching_rows) =
row_keys_by_epochs.get(&(index_value as Epoch))
{
let sorted_rows = matching_rows.iter().collect::<BTreeSet<_>>();
stream::iter(sorted_rows.iter())
.then(async |&row_key| {
compute_input_for_row(&row_cache, row_key, index_value, &index_path, column_ids)
.await
})
.collect::<Vec<RowInput>>()
.await
} else {
let proven_node = non_existence_inputs
.find_row_node_for_non_existence(index_value)
.await
.unwrap_or_else(|| {
panic!("node with key {index_value} not found in index tree cache")
.unwrap_or_else(|_| {
panic!("node for non-existence not found for index value {index_value}")
});
let proven_rows =
if let Some(matching_rows) = row_keys_by_epochs.get(&(index_value as Epoch)) {
let sorted_rows = matching_rows.iter().collect::<BTreeSet<_>>();
stream::iter(sorted_rows.iter())
.then(async |&row_key| {
compute_input_for_row(
&row_cache,
row_key,
index_value,
&index_path,
column_ids,
)
.await
})
.collect::<Vec<RowInput>>()
.await
} else {
let proven_node = non_existence_inputs
.find_row_node_for_non_existence(index_value)
.await
.unwrap_or_else(|_| {
panic!("node for non-existence not found for index value {index_value}")
});
let row_input = compute_input_for_row(
non_existence_inputs.row_tree,
&proven_node,
index_value,
&index_path,
column_ids,
)
.await;
vec![row_input]
};
proven_rows
})
.concat()
.await
let row_input = compute_input_for_row(
non_existence_inputs.row_tree,
&proven_node,
index_value,
&index_path,
column_ids,
)
.await;
vec![row_input]
};
proven_rows
};

// TODO: This implementation causes an error in DQ:
// `implementation of `std::marker::Send` is not general enough`
/*
let chunks = stream::iter(sorted_index_values.into_iter())
.then(prove_rows)
.concat()
.await
*/
let mut chunks = vec![];
for index_value in sorted_index_values {
let chunk = prove_rows(index_value).await;
chunks.extend(chunk);
}

let chunks = chunks
.chunks(CHUNK_SIZE)
.map(|chunk| chunk.to_vec())
.collect_vec())
.collect_vec();

Ok(chunks)
}

/// Key for nodes of the `UTForChunks<NUM_CHUNKS>` employed to
Expand Down Expand Up @@ -195,8 +204,10 @@ async fn generate_chunks<const CHUNK_SIZE: usize, C: ContextProvider>(
///
/// (2,0) (2,1) (2,2) (2,3) (2,4)
/// ```
#[derive(Clone, Debug, Hash, Eq, PartialEq, Default)]
pub struct UTKey<const ARITY: usize>((usize, usize));
#[derive(
Clone, Copy, Debug, Default, PartialEq, PartialOrd, Ord, Eq, Hash, Serialize, Deserialize,
)]
pub struct UTKey<const ARITY: usize>(pub (usize, usize));

impl<const ARITY: usize> UTKey<ARITY> {
/// Compute the key of the child node of `self` that has `num_left_siblings`
Expand Down Expand Up @@ -318,15 +329,13 @@ impl<const ARITY: usize> ProvingTree<ARITY> {
let num_childrens = parent_node.children_keys.len();
let new_child_key = parent_key.children_key(num_childrens);
let child_node = ProvingTreeNode {
parent_key: Some(parent_key.clone()),
parent_key: Some(*parent_key),
children_keys: vec![],
};
// insert new child in the set of children of the parent
parent_node.children_keys.push(new_child_key.clone());
parent_node.children_keys.push(new_child_key);
assert!(
self.nodes
.insert(new_child_key.clone(), child_node)
.is_none(),
self.nodes.insert(new_child_key, child_node).is_none(),
"Node with key {:?} already found in the tree",
new_child_key
);
Expand All @@ -339,7 +348,7 @@ impl<const ARITY: usize> ProvingTree<ARITY> {
};
let root_key = UTKey((0, 0));
assert!(
self.nodes.insert(root_key.clone(), root).is_none(),
self.nodes.insert(root_key, root).is_none(),
"Error: root node inserted multiple times"
);
root_key
Expand Down Expand Up @@ -412,7 +421,7 @@ impl<const ARITY: usize> ProvingTree<ARITY> {
while node_key.is_some() {
// place node key in the path
let key = node_key.unwrap();
path.push(key.clone());
path.push(*key);
// fetch key of the parent node, if any
node_key = self
.nodes
Expand Down Expand Up @@ -449,7 +458,7 @@ impl<const NUM_CHUNKS: usize> UTForChunksBuilder<NUM_CHUNKS> {
let path = tree.compute_path_for_leaf(node_index);
(
(
path.last().unwrap().clone(), // chunk node is always a leaf of the tree, so it is the last node
*path.last().unwrap(), // chunk node is always a leaf of the tree, so it is the last node
// in the path
chunk,
),
Expand Down
50 changes: 3 additions & 47 deletions mp2-v1/src/query/planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ use ryhope::{
use std::{fmt::Debug, future::Future};
use tokio_postgres::{row::Row as PsqlRow, types::ToSql, NoTls};
use verifiable_db::query::{
aggregation::{ChildPosition, NodeInfo, QueryBounds},
batching::TreePathInputs,
api::TreePathInputs,
utils::{ChildPosition, NodeInfo, QueryBounds},
};

use crate::indexing::{
Expand Down Expand Up @@ -65,7 +65,7 @@ impl<'a, C: ContextProvider> NonExistenceInput<'a, C> {
}
}

pub(crate) async fn find_row_node_for_non_existence(
pub async fn find_row_node_for_non_existence(
&self,
primary: BlockPrimaryIndex,
) -> anyhow::Result<RowTreeKey> {
Expand Down Expand Up @@ -375,50 +375,6 @@ impl<
}
}

/// Returns the proving plan to prove the non existence of node of the query in this row tree at
/// the epoch primary. It also returns the leaf node chosen.
///
/// The row tree is given and specialized to psql storage since that is the only official storage
/// supported.
/// The `table_name` must be the one given to parsil settings, it is the human friendly table
/// name, i.e. the vTable name.
/// The pool is to issue specific query
/// Primary is indicating the primary index over which this row tree is looked at.
/// Settings are the parsil settings corresponding to the current SQL and current table looked at.
/// Pis contain the bounds and placeholders values.
/// TODO: we should extend ryhope to offer this API directly on the tree since it's very related.
pub async fn proving_plan_for_non_existence<C>(
row_tree: &MerkleTreeKvDb<RowTree, RowPayload<BlockPrimaryIndex>, DBRowStorage>,
table_name: String,
pool: &DBPool,
primary: BlockPrimaryIndex,
settings: &ParsilSettings<C>,
bounds: &QueryBounds,
) -> anyhow::Result<(RowTreeKey, UpdateTree<RowTreeKey>)>
where
C: ContextProvider,
{
let to_be_proven_node = {
let input = NonExistenceInput {
row_tree,
table_name,
pool,
settings,
bounds: bounds.clone(),
};
input.find_row_node_for_non_existence(primary).await
}?;

let path = row_tree
// since the epoch starts at genesis we can directly give the block number !
.lineage_at(&to_be_proven_node, primary as Epoch)
.await
.expect("node doesn't have a lineage?")
.into_full_path()
.collect_vec();
let proving_tree = UpdateTree::from_paths([path], primary as Epoch);
Ok((to_be_proven_node.clone(), proving_tree))
}
/// Fetch a key `k` from a tree, assuming that the key is in the
/// tree. Therefore, it handles differently the case when `k` is not found:
/// - If `T::WIDE_LINEAGE` is true, then `k` might not be found because the
Expand Down
1 change: 0 additions & 1 deletion mp2-v1/tests/common/cases/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ use super::table::Table;

pub mod contract;
pub mod indexing;
pub mod planner;
pub mod query;
pub mod table_source;

Expand Down
Loading