Skip to content

Commit

Permalink
rm use of bn256 kzg deps
Browse files Browse the repository at this point in the history
  • Loading branch information
epociask committed May 28, 2024
1 parent 11dc1e2 commit 8925c41
Show file tree
Hide file tree
Showing 8 changed files with 765 additions and 1,184 deletions.
1,580 changes: 587 additions & 993 deletions arbitrator/Cargo.lock

Large diffs are not rendered by default.

7 changes: 0 additions & 7 deletions arbitrator/jit/Cargo.lock

This file was deleted.

4 changes: 3 additions & 1 deletion arbitrator/jit/src/gostack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ impl MemoryViewContainer {
fn closure<'a>(
store: &'a StoreRef,
) -> impl (for<'b> FnOnce(&'b Memory) -> MemoryView<'b>) + 'a {
move |memory: &Memory| memory.view(&store)
move |memory: &Memory| {
memory.view(&store.clone())
}
}

let store = env.as_store_ref();
Expand Down
8 changes: 0 additions & 8 deletions arbitrator/prover/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,6 @@ smallvec = { version = "1.10.0", features = ["serde"] }
arbutil = { path = "../arbutil/" }
c-kzg = "0.4.0" # TODO: look into switching to rust-kzg (no crates.io release or hosted rustdoc yet)
sha2 = "0.9.9"
ark-bn254 = "0.4.0"
ark-std = "0.4.0"
ark-ff = "0.4.0"
ark-ec = "0.4.0"
ark-serialize = "0.4.0"
num-bigint = "0.4"

kzgbn254 = { git = "https://github.com/afkbyte/rust-kzg-bn254.git", branch = "master", package = "rust-kzg-bn254" }

[lib]
name = "prover"
Expand Down
277 changes: 138 additions & 139 deletions arbitrator/prover/src/kzgbn254.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@

use crate::utils::Bytes32;
use ark_ec::{AffineRepr, CurveGroup,pairing::Pairing};
use kzgbn254::{
kzg::Kzg,
blob::Blob,
helpers::{remove_empty_byte_from_padded_bytes, to_fr_array}
};
// use ark_ec::{AffineRepr, CurveGroup,pairing::Pairing};
// use kzgbn254::{
// kzg::Kzg,
// blob::Blob,
// helpers::{remove_empty_byte_from_padded_bytes, to_fr_array}
// };
use eyre::{ensure, Result};
use ark_bn254::{Bn254, G1Affine, G1Projective, G2Affine};
// use ark_bn254::{Bn254, G1Affine, G1Projective, G2Affine};
use num::BigUint;
use sha2::{Digest, Sha256};
use std::{convert::TryFrom, io::Write};
use ark_serialize::CanonicalSerialize;
// use ark_serialize::CanonicalSerialize;
use num::Zero;

lazy_static::lazy_static! {
Expand All @@ -21,13 +21,13 @@ lazy_static::lazy_static! {
// srs_order = 268435456
// srs_points_to_load = 131072

pub static ref KZG: Kzg = Kzg::setup(
"./arbitrator/prover/src/test-files/g1.point",
"./arbitrator/prover/src/test-files/g2.point",
"./arbitrator/prover/src/test-files/g2.point.powerOf2",
3000,
3000
).unwrap();
// pub static ref KZG: Kzg = Kzg::setup(
// "./arbitrator/prover/src/test-files/g1.point",
// "./arbitrator/prover/src/test-files/g2.point",
// "./arbitrator/prover/src/test-files/g2.point.powerOf2",
// 3000,
// 3000
// ).unwrap();

// modulus for the underlying field F_r of the elliptic curve
// see https://docs.eigenlayer.xyz/eigenda/integrations-guides/dispersal/blob-serialization-requirements
Expand All @@ -37,135 +37,134 @@ lazy_static::lazy_static! {
pub static ref FIELD_ELEMENTS_PER_BLOB: usize = 65536;
}

/// Creates a KZG preimage proof consumable by the point evaluation precompile.
pub fn prove_kzg_preimage_bn254(
hash: Bytes32,
preimage: &[u8],
offset: u32,
out: &mut impl Write,
) -> Result<()> {

let mut kzg = KZG.clone();

// expand the roots of unity, should work as long as it's longer than chunk length and chunks
// from my understanding the data_setup_mins pads both min_chunk_len and min_num_chunks to
// the next power of 2 so we can load a max of 2048 from the test values here
// then we can take the roots of unity we actually need (len polynomial) and pass them in
// @anup, this is a really gross way to do this, pls tell better way
kzg.data_setup_mins(1, 2048)?;

// we are expecting the preimage to be unpadded when turned into a blob function so need to unpad it first
let unpadded_preimage_vec: Vec<u8> = remove_empty_byte_from_padded_bytes(preimage);
let unpadded_preimage = unpadded_preimage_vec.as_slice();

// repad it here, TODO: need to ask to change the interface for this
let blob = Blob::from_bytes_and_pad(unpadded_preimage);
let blob_polynomial = blob.to_polynomial().unwrap();
let blob_commitment = kzg.commit(&blob_polynomial).unwrap();

let mut commitment_bytes = Vec::new();
blob_commitment.serialize_uncompressed(&mut commitment_bytes).unwrap();

let mut expected_hash: Bytes32 = Sha256::digest(&*commitment_bytes).into();
expected_hash[0] = 1;

ensure!(
hash == expected_hash,
"Trying to prove versioned hash {} preimage but recomputed hash {}",
hash,
expected_hash,
);

ensure!(
offset % 32 == 0,
"Cannot prove blob preimage at unaligned offset {}",
offset,
);

let offset_usize = usize::try_from(offset)?;
let mut proving_offset = offset;

// address proving past end edge case later
let proving_past_end = offset_usize >= preimage.len();
if proving_past_end {
// Proving any offset proves the length which is all we need here,
// because we're past the end of the preimage.
proving_offset = 0;
}
// pub fn prove_kzg_preimage_bn254(
// hash: Bytes32,
// preimage: &[u8],
// offset: u32,
// out: &mut impl Write,
// ) -> Result<()> {

// let mut kzg = KZG.clone();

// // expand the roots of unity, should work as long as it's longer than chunk length and chunks
// // from my understanding the data_setup_mins pads both min_chunk_len and min_num_chunks to
// // the next power of 2 so we can load a max of 2048 from the test values here
// // then we can take the roots of unity we actually need (len polynomial) and pass them in
// // @anup, this is a really gross way to do this, pls tell better way
// kzg.data_setup_mins(1, 2048)?;

// // we are expecting the preimage to be unpadded when turned into a blob function so need to unpad it first
// let unpadded_preimage_vec: Vec<u8> = remove_empty_byte_from_padded_bytes(preimage);
// let unpadded_preimage = unpadded_preimage_vec.as_slice();

// // repad it here, TODO: need to ask to change the interface for this
// let blob = Blob::from_bytes_and_pad(unpadded_preimage);
// let blob_polynomial = blob.to_polynomial().unwrap();
// let blob_commitment = kzg.commit(&blob_polynomial).unwrap();

// let mut commitment_bytes = Vec::new();
// blob_commitment.serialize_uncompressed(&mut commitment_bytes).unwrap();

// let mut expected_hash: Bytes32 = Sha256::digest(&*commitment_bytes).into();
// expected_hash[0] = 1;

// ensure!(
// hash == expected_hash,
// "Trying to prove versioned hash {} preimage but recomputed hash {}",
// hash,
// expected_hash,
// );

// ensure!(
// offset % 32 == 0,
// "Cannot prove blob preimage at unaligned offset {}",
// offset,
// );

// let offset_usize = usize::try_from(offset)?;
// let mut proving_offset = offset;

// // address proving past end edge case later
// let proving_past_end = offset_usize >= preimage.len();
// if proving_past_end {
// // Proving any offset proves the length which is all we need here,
// // because we're past the end of the preimage.
// proving_offset = 0;
// }

let proving_offset_bytes = proving_offset.to_le_bytes();
let mut padded_proving_offset_bytes = [0u8; 32];
padded_proving_offset_bytes[32 - proving_offset_bytes.len()..].copy_from_slice(&proving_offset_bytes);
// let proving_offset_bytes = proving_offset.to_le_bytes();
// let mut padded_proving_offset_bytes = [0u8; 32];
// padded_proving_offset_bytes[32 - proving_offset_bytes.len()..].copy_from_slice(&proving_offset_bytes);

// in production we will first need to perform an IFFT on the blob data to get the expected y value
let mut proven_y = blob.get_blob_data();
let offset_usize = offset as usize; // Convert offset to usize
proven_y = proven_y[offset_usize..(offset_usize + 32)].to_vec();
// // in production we will first need to perform an IFFT on the blob data to get the expected y value
// let mut proven_y = blob.get_blob_data();
// let offset_usize = offset as usize; // Convert offset to usize
// proven_y = proven_y[offset_usize..(offset_usize + 32)].to_vec();

let proven_y_fr = to_fr_array(&proven_y);
// let proven_y_fr = to_fr_array(&proven_y);

let polynomial = blob.to_polynomial().unwrap();
// let polynomial = blob.to_polynomial().unwrap();

let g2_generator = G2Affine::generator();
let z_g2= (g2_generator * proven_y_fr[0]).into_affine();

let g2_tau: G2Affine = kzg.get_g2_points().get(1).unwrap().clone();
let g2_tau_minus_g2_z = (g2_tau - z_g2).into_affine();

// required roots of unity are the first polynomial length roots in the expanded set
let roots_of_unity = kzg.get_expanded_roots_of_unity();
let required_roots_of_unity = &roots_of_unity[0..polynomial.len()];
// TODO: ask for interface alignment later
let kzg_proof = match kzg.compute_kzg_proof(&blob_polynomial, offset as u64, &required_roots_of_unity.to_vec()) {
Ok(proof) => proof,
Err(err) => return Err(err.into()),
};

let mut kzg_proof_uncompressed_bytes = Vec::new();
kzg_proof.serialize_uncompressed(&mut kzg_proof_uncompressed_bytes).unwrap();

let xminusz_x0: BigUint = g2_tau_minus_g2_z.x.c0.into();
let xminusz_x1: BigUint = g2_tau_minus_g2_z.x.c1.into();
let xminusz_y0: BigUint = g2_tau_minus_g2_z.y.c0.into();
let xminusz_y1: BigUint = g2_tau_minus_g2_z.y.c1.into();

// turn each element of xminusz into bytes, then pad each to 32 bytes, then append in order x1,x0,y1,y0
let mut xminusz_encoded_bytes = Vec::with_capacity(128);
append_left_padded_biguint_be(&mut xminusz_encoded_bytes, &xminusz_x1);
append_left_padded_biguint_be(&mut xminusz_encoded_bytes, &xminusz_x0);
append_left_padded_biguint_be(&mut xminusz_encoded_bytes, &xminusz_y1);
append_left_padded_biguint_be(&mut xminusz_encoded_bytes, &xminusz_y0);

// encode the commitment
let commitment_x_bigint: BigUint = blob_commitment.x.into();
let commitment_y_bigint: BigUint = blob_commitment.y.into();
let mut commitment_encoded_bytes = Vec::with_capacity(32);
append_left_padded_biguint_be(&mut commitment_encoded_bytes, &commitment_x_bigint);
append_left_padded_biguint_be(&mut commitment_encoded_bytes, &commitment_y_bigint);


// encode the proof
let proof_x_bigint: BigUint = kzg_proof.x.into();
let proof_y_bigint: BigUint = kzg_proof.y.into();
let mut proof_encoded_bytes = Vec::with_capacity(64);
append_left_padded_biguint_be(&mut proof_encoded_bytes, &proof_x_bigint);
append_left_padded_biguint_be(&mut proof_encoded_bytes, &proof_y_bigint);

out.write_all(&*hash)?; // hash [:32]
out.write_all(&padded_proving_offset_bytes)?; // evaluation point [32:64]
out.write_all(&*proven_y)?; // expected output [64:96]
out.write_all(&xminusz_encoded_bytes)?; // g2TauMinusG2z [96:224]
out.write_all(&*commitment_encoded_bytes)?; // kzg commitment [224:288]
out.write_all(&proof_encoded_bytes)?; // proof [288:352]
// let g2_generator = G2Affine::generator();
// let z_g2= (g2_generator * proven_y_fr[0]).into_affine();

// let g2_tau: G2Affine = kzg.get_g2_points().get(1).unwrap().clone();
// let g2_tau_minus_g2_z = (g2_tau - z_g2).into_affine();

// // required roots of unity are the first polynomial length roots in the expanded set
// let roots_of_unity = kzg.get_expanded_roots_of_unity();
// let required_roots_of_unity = &roots_of_unity[0..polynomial.len()];
// // TODO: ask for interface alignment later
// let kzg_proof = match kzg.compute_kzg_proof(&blob_polynomial, offset as u64, &required_roots_of_unity.to_vec()) {
// Ok(proof) => proof,
// Err(err) => return Err(err.into()),
// };

// let mut kzg_proof_uncompressed_bytes = Vec::new();
// kzg_proof.serialize_uncompressed(&mut kzg_proof_uncompressed_bytes).unwrap();

// let xminusz_x0: BigUint = g2_tau_minus_g2_z.x.c0.into();
// let xminusz_x1: BigUint = g2_tau_minus_g2_z.x.c1.into();
// let xminusz_y0: BigUint = g2_tau_minus_g2_z.y.c0.into();
// let xminusz_y1: BigUint = g2_tau_minus_g2_z.y.c1.into();

// // turn each element of xminusz into bytes, then pad each to 32 bytes, then append in order x1,x0,y1,y0
// let mut xminusz_encoded_bytes = Vec::with_capacity(128);
// append_left_padded_biguint_be(&mut xminusz_encoded_bytes, &xminusz_x1);
// append_left_padded_biguint_be(&mut xminusz_encoded_bytes, &xminusz_x0);
// append_left_padded_biguint_be(&mut xminusz_encoded_bytes, &xminusz_y1);
// append_left_padded_biguint_be(&mut xminusz_encoded_bytes, &xminusz_y0);

// // encode the commitment
// let commitment_x_bigint: BigUint = blob_commitment.x.into();
// let commitment_y_bigint: BigUint = blob_commitment.y.into();
// let mut commitment_encoded_bytes = Vec::with_capacity(32);
// append_left_padded_biguint_be(&mut commitment_encoded_bytes, &commitment_x_bigint);
// append_left_padded_biguint_be(&mut commitment_encoded_bytes, &commitment_y_bigint);


// // encode the proof
// let proof_x_bigint: BigUint = kzg_proof.x.into();
// let proof_y_bigint: BigUint = kzg_proof.y.into();
// let mut proof_encoded_bytes = Vec::with_capacity(64);
// append_left_padded_biguint_be(&mut proof_encoded_bytes, &proof_x_bigint);
// append_left_padded_biguint_be(&mut proof_encoded_bytes, &proof_y_bigint);

// out.write_all(&*hash)?; // hash [:32]
// out.write_all(&padded_proving_offset_bytes)?; // evaluation point [32:64]
// out.write_all(&*proven_y)?; // expected output [64:96]
// out.write_all(&xminusz_encoded_bytes)?; // g2TauMinusG2z [96:224]
// out.write_all(&*commitment_encoded_bytes)?; // kzg commitment [224:288]
// out.write_all(&proof_encoded_bytes)?; // proof [288:352]


Ok(())
}
// Helper function to append BigUint bytes into the vector with padding; left padded big endian bytes to 32
fn append_left_padded_biguint_be(vec: &mut Vec<u8>, biguint: &BigUint) {
let bytes = biguint.to_bytes_be();
let padding = 32 - bytes.len();
vec.extend_from_slice(&vec![0; padding]);
vec.extend_from_slice(&bytes);
}
// Ok(())
// }
// // Helper function to append BigUint bytes into the vector with padding; left padded big endian bytes to 32
// fn append_left_padded_biguint_be(vec: &mut Vec<u8>, biguint: &BigUint) {
// let bytes = biguint.to_bytes_be();
// let padding = 32 - bytes.len();
// vec.extend_from_slice(&vec![0; padding]);
// vec.extend_from_slice(&bytes);
// }

10 changes: 5 additions & 5 deletions arbitrator/prover/src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use crate::{
binary::{parse, FloatInstruction, Local, NameCustomSection, WasmBinary},
host,
kzg::prove_kzg_preimage,
kzgbn254::prove_kzg_preimage_bn254,
// kzgbn254::prove_kzg_preimage_bn254,
memory::Memory,
merkle::{Merkle, MerkleType},
reinterpret::{ReinterpretAsSigned, ReinterpretAsUnsigned},
Expand Down Expand Up @@ -2331,10 +2331,10 @@ impl Machine {
}
PreimageType::EigenDAHash => {
// TODO - Add eigenDA kzg preimage verification here
println!("Generating proof for EigenDA preimage");
prove_kzg_preimage_bn254(hash, &preimage, offset, &mut data)
.expect("Failed to generate eigenDA KZG preimage proof");
//data.extend(preimage);
// println!("Generating proof for EigenDA preimage");
// prove_kzg_preimage_bn254(hash, &preimage, offset, &mut data)
// .expect("Failed to generate eigenDA KZG preimage proof");
data.extend(preimage);
}
}
} else if next_inst.opcode == Opcode::ReadInboxMessage {
Expand Down
Loading

0 comments on commit 8925c41

Please sign in to comment.