From dc3db99e059adc32d7f911dcc46db557e24b626e Mon Sep 17 00:00:00 2001 From: Juan Munoz Date: Tue, 17 Dec 2024 16:29:25 -0300 Subject: [PATCH 1/3] initial commit --- core/node/da_clients/Cargo.toml | 4 +- core/node/da_clients/src/eigen/verifier.rs | 100 ++++++++---------- .../da_clients/src/eigen/verifier_tests.rs | 16 +-- .../layers/da_clients/eigen.rs | 7 +- 4 files changed, 56 insertions(+), 71 deletions(-) diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index f12042b12576..efc588faa081 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -62,7 +62,9 @@ ethabi.workspace = true rust-kzg-bn254.workspace = true ark-bn254.workspace = true num-bigint.workspace = true -serial_test.workspace = true zksync_web3_decl.workspace = true zksync_eth_client.workspace = true url.workspace = true + +[dev-dependencies] +serial_test.workspace = true diff --git a/core/node/da_clients/src/eigen/verifier.rs b/core/node/da_clients/src/eigen/verifier.rs index 4a314a751b87..385c536e8dd3 100644 --- a/core/node/da_clients/src/eigen/verifier.rs +++ b/core/node/da_clients/src/eigen/verifier.rs @@ -185,9 +185,9 @@ impl Verifier { Ok(()) } - pub fn hash_encode_blob_header(&self, blob_header: BlobHeader) -> Vec { + pub fn hash_encode_blob_header(&self, blob_header: &BlobHeader) -> Vec { let mut blob_quorums = vec![]; - for quorum in blob_header.blob_quorum_params { + for quorum in &blob_header.blob_quorum_params { let quorum = Token::Tuple(vec![ Token::Uint(ethabi::Uint::from(quorum.quorum_number)), Token::Uint(ethabi::Uint::from(quorum.adversary_threshold_percentage)), @@ -226,20 +226,16 @@ impl Verifier { } let mut computed_hash = leaf.to_vec(); for i in 0..proof.len() / 32 { - let mut combined = proof[i * 32..(i + 1) * 32] - .iter() - .chain(computed_hash.iter()) - .cloned() - .collect::>(); + let mut buffer = [0u8; 64]; if index % 2 == 0 { - combined = computed_hash - .iter() - .chain(proof[i * 32..(i + 1) * 32].iter()) - .cloned() - .collect::>(); - }; + buffer[..32].copy_from_slice(&computed_hash); + buffer[32..].copy_from_slice(&proof[i * 32..(i + 1) * 32]); + } else { + buffer[..32].copy_from_slice(&proof[i * 32..(i + 1) * 32]); + buffer[32..].copy_from_slice(&computed_hash); + } let mut keccak = Keccak::v256(); - keccak.update(&combined); + keccak.update(&buffer); let mut hash = [0u8; 32]; keccak.finalize(&mut hash); computed_hash = hash.to_vec(); @@ -250,15 +246,15 @@ impl Verifier { } /// Verifies the certificate's batch root - pub fn verify_merkle_proof(&self, cert: BlobInfo) -> Result<(), VerificationError> { - let inclusion_proof = cert.blob_verification_proof.inclusion_proof; - let root = cert + pub fn verify_merkle_proof(&self, cert: &BlobInfo) -> Result<(), VerificationError> { + let inclusion_proof = &cert.blob_verification_proof.inclusion_proof; + let root = &cert .blob_verification_proof .batch_medatada .batch_header .batch_root; let blob_index = cert.blob_verification_proof.blob_index; - let blob_header = cert.blob_header; + let blob_header = &cert.blob_header; let blob_header_hash = self.hash_encode_blob_header(blob_header); let mut keccak = Keccak::v256(); @@ -267,9 +263,9 @@ impl Verifier { keccak.finalize(&mut leaf_hash); let generated_root = - self.process_inclusion_proof(&inclusion_proof, &leaf_hash, blob_index)?; + self.process_inclusion_proof(inclusion_proof, &leaf_hash, blob_index)?; - if generated_root != root { + if generated_root != *root { return Err(VerificationError::DifferentRoots); } Ok(()) @@ -277,14 +273,14 @@ impl Verifier { fn hash_batch_metadata( &self, - batch_header: BatchHeader, - signatory_record_hash: Vec, + batch_header: &BatchHeader, + signatory_record_hash: &[u8], confirmation_block_number: u32, ) -> Vec { let batch_header_token = Token::Tuple(vec![ - Token::FixedBytes(batch_header.batch_root), - Token::Bytes(batch_header.quorum_numbers), - Token::Bytes(batch_header.quorum_signed_percentages), + Token::FixedBytes(batch_header.batch_root.clone()), // Clone only where necessary + Token::Bytes(batch_header.quorum_numbers.clone()), + Token::Bytes(batch_header.quorum_signed_percentages.clone()), Token::Uint(ethabi::Uint::from(batch_header.reference_block_number)), ]); @@ -297,7 +293,7 @@ impl Verifier { let hash_token = Token::Tuple(vec![ Token::FixedBytes(header_hash.to_vec()), - Token::FixedBytes(signatory_record_hash), + Token::FixedBytes(signatory_record_hash.to_owned()), // Clone only if required ]); let mut hash_encoded = encode(&[hash_token]); @@ -330,7 +326,7 @@ impl Verifier { async fn call_batch_id_to_metadata_hash( &self, - blob_info: BlobInfo, + blob_info: &BlobInfo, ) -> Result, VerificationError> { let context_block = self.get_context_block().await?; @@ -364,21 +360,19 @@ impl Verifier { } /// Verifies the certificate batch hash - pub async fn verify_batch(&self, blob_info: BlobInfo) -> Result<(), VerificationError> { - let expected_hash = self - .call_batch_id_to_metadata_hash(blob_info.clone()) - .await?; + pub async fn verify_batch(&self, blob_info: &BlobInfo) -> Result<(), VerificationError> { + let expected_hash = self.call_batch_id_to_metadata_hash(blob_info).await?; if expected_hash == vec![0u8; 32] { return Err(VerificationError::EmptyHash); } let actual_hash = self.hash_batch_metadata( - blob_info + &blob_info .blob_verification_proof .batch_medatada .batch_header, - blob_info + &blob_info .blob_verification_proof .batch_medatada .signatory_record_hash, @@ -401,15 +395,11 @@ impl Verifier { } // Read the offset (first 32 bytes) - let offset = { - let mut offset_bytes = [0u8; 32]; - offset_bytes.copy_from_slice(&encoded[0..32]); - usize::from_be_bytes( - offset_bytes[24..32] - .try_into() - .map_err(|_| "Offset is too large")?, - ) - }; + let offset = usize::from_be_bytes( + encoded[24..32] + .try_into() + .map_err(|_| "Offset is too large")?, + ); // Check if offset is valid if offset + 32 > encoded.len() { @@ -417,15 +407,11 @@ impl Verifier { } // Read the length (32 bytes at the offset position) - let length = { - let mut length_bytes = [0u8; 32]; - length_bytes.copy_from_slice(&encoded[offset..offset + 32]); - usize::from_be_bytes( - length_bytes[24..32] - .try_into() - .map_err(|_| "Offset is too large")?, - ) - }; + let length = usize::from_be_bytes( + encoded[offset + 24..offset + 32] + .try_into() + .map_err(|_| "Length is too large")?, + ); // Check if the length is valid if offset + 32 + length > encoded.len() { @@ -494,9 +480,9 @@ impl Verifier { } /// Verifies that the certificate's blob quorum params are correct - pub async fn verify_security_params(&self, cert: BlobInfo) -> Result<(), VerificationError> { - let blob_header = cert.blob_header; - let batch_header = cert.blob_verification_proof.batch_medatada.batch_header; + pub async fn verify_security_params(&self, cert: &BlobInfo) -> Result<(), VerificationError> { + let blob_header = &cert.blob_header; + let batch_header = &cert.blob_verification_proof.batch_medatada.batch_header; let mut confirmed_quorums: HashMap = HashMap::new(); for i in 0..blob_header.blob_quorum_params.len() { @@ -545,9 +531,9 @@ impl Verifier { &self, cert: BlobInfo, ) -> Result<(), VerificationError> { - self.verify_batch(cert.clone()).await?; - self.verify_merkle_proof(cert.clone())?; - self.verify_security_params(cert.clone()).await?; + self.verify_batch(&cert).await?; + self.verify_merkle_proof(&cert)?; + self.verify_security_params(&cert).await?; Ok(()) } } diff --git a/core/node/da_clients/src/eigen/verifier_tests.rs b/core/node/da_clients/src/eigen/verifier_tests.rs index bdea8f9a9960..9875030a4b2d 100644 --- a/core/node/da_clients/src/eigen/verifier_tests.rs +++ b/core/node/da_clients/src/eigen/verifier_tests.rs @@ -212,7 +212,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_merkle_proof(cert); + let result = verifier.verify_merkle_proof(&cert); assert!(result.is_ok()); } @@ -295,7 +295,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_merkle_proof(cert); + let result = verifier.verify_merkle_proof(&cert); assert!(result.is_ok()); } @@ -332,7 +332,7 @@ mod test { }, ], }; - let result = verifier.hash_encode_blob_header(blob_header); + let result = verifier.hash_encode_blob_header(&blob_header); let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; assert_eq!(result, hex::decode(expected).unwrap()); } @@ -371,7 +371,7 @@ mod test { }, ], }; - let result = verifier.hash_encode_blob_header(blob_header); + let result = verifier.hash_encode_blob_header(&blob_header); let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; assert_eq!(result, hex::decode(expected).unwrap()); } @@ -495,7 +495,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_batch(cert).await; + let result = verifier.verify_batch(&cert).await; assert!(result.is_ok()); } @@ -603,7 +603,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_batch(cert).await; + let result = verifier.verify_batch(&cert).await; assert!(result.is_ok()); } @@ -685,7 +685,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_security_params(cert).await; + let result = verifier.verify_security_params(&cert).await; assert!(result.is_ok()); } @@ -810,7 +810,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_security_params(cert).await; + let result = verifier.verify_security_params(&cert).await; assert!(result.is_ok()); } } diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs index 02e3d5b2f8fd..96874a6ab4a2 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs @@ -66,15 +66,12 @@ pub struct GetBlobFromDB { #[async_trait::async_trait] impl GetBlobData for GetBlobFromDB { - async fn call(&self, input: &'_ str) -> anyhow::Result>> { - let pool = self.pool.clone(); - let input = input.to_string(); - let mut conn = pool.connection_tagged("eigen_client").await?; + async fn call(&self, input: &str) -> anyhow::Result>> { + let mut conn = self.pool.connection_tagged("eigen_client").await?; let batch = conn .data_availability_dal() .get_blob_data_by_blob_id(&input) .await?; - drop(conn); Ok(batch.map(|b| b.pubdata)) } } From 4fc871a8154a40e3a7baea3e727b755c051b4347 Mon Sep 17 00:00:00 2001 From: Juan Munoz Date: Thu, 19 Dec 2024 12:20:18 -0300 Subject: [PATCH 2/3] clippy suggestion --- .../src/implementations/layers/da_clients/eigen.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs index ee5755d85c14..e24ff7ef65b4 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs @@ -70,7 +70,7 @@ impl GetBlobData for GetBlobFromDB { let mut conn = self.pool.connection_tagged("eigen_client").await?; let batch = conn .data_availability_dal() - .get_blob_data_by_blob_id(&input) + .get_blob_data_by_blob_id(input) .await?; Ok(batch.map(|b| b.pubdata)) } From 296c0156854cfa2daff1c65cda2fa279f67f1dd2 Mon Sep 17 00:00:00 2001 From: juan518munoz <62400508+juan518munoz@users.noreply.github.com> Date: Thu, 19 Dec 2024 13:08:12 -0300 Subject: [PATCH 3/3] feat(eigen-client-extra-features): address PR comments (part 3) (#376) * use keccak256 fn * simplify get_context_block * use saturating sub * feat(eigen-client-extra-features): address PR comments (part 4) (#378) * Replace decode bytes for ethabi * Add default to eigenconfig * Change str to url * Add index to data availability table * Address comments * Change error to verificationerror * Format code * feat(eigen-client-extra-features): address PR comments (part 5) (#377) * use trait object * prevent blocking non async code * clippy suggestion --------- Co-authored-by: juan518munoz <62400508+juan518munoz@users.noreply.github.com> --------- Co-authored-by: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> --- .../lib/config/src/configs/da_client/eigen.rs | 18 ++- ...2144402_create_index_data_availability.sql | 1 + core/node/da_clients/src/eigen/client.rs | 16 ++- .../node/da_clients/src/eigen/client_tests.rs | 10 +- core/node/da_clients/src/eigen/sdk.rs | 27 +++- core/node/da_clients/src/eigen/verifier.rs | 135 +++++++----------- .../da_clients/src/eigen/verifier_tests.rs | 5 +- .../layers/da_clients/eigen.rs | 4 + 8 files changed, 109 insertions(+), 107 deletions(-) create mode 100644 core/lib/dal/migrations/20241812144402_create_index_data_availability.sql diff --git a/core/lib/config/src/configs/da_client/eigen.rs b/core/lib/config/src/configs/da_client/eigen.rs index cd036e407427..b7723e2271a6 100644 --- a/core/lib/config/src/configs/da_client/eigen.rs +++ b/core/lib/config/src/configs/da_client/eigen.rs @@ -1,7 +1,7 @@ use serde::Deserialize; use zksync_basic_types::secrets::PrivateKey; /// Configuration for the EigenDA remote disperser client. -#[derive(Clone, Debug, PartialEq, Deserialize, Default)] +#[derive(Clone, Debug, PartialEq, Deserialize)] pub struct EigenConfig { /// URL of the Disperser RPC server pub disperser_rpc: String, @@ -24,6 +24,22 @@ pub struct EigenConfig { pub chain_id: u64, } +impl Default for EigenConfig { + fn default() -> Self { + Self { + disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), + settlement_layer_confirmation_depth: 0, + eigenda_eth_rpc: Some("https://ethereum-holesky-rpc.publicnode.com".to_string()), + eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + wait_for_finalization: false, + authenticated: false, + g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + chain_id: 19000, + } + } +} + #[derive(Clone, Debug, PartialEq)] pub struct EigenSecrets { pub private_key: PrivateKey, diff --git a/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql b/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql new file mode 100644 index 000000000000..938d2e09de44 --- /dev/null +++ b/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql @@ -0,0 +1 @@ +CREATE INDEX idx_blob_id_l1_batch_number ON data_availability (blob_id, l1_batch_number); diff --git a/core/node/da_clients/src/eigen/client.rs b/core/node/da_clients/src/eigen/client.rs index ef4baa1c2ad0..5baee9475e92 100644 --- a/core/node/da_clients/src/eigen/client.rs +++ b/core/node/da_clients/src/eigen/client.rs @@ -13,21 +13,23 @@ use super::sdk::RawEigenClient; use crate::utils::to_retriable_da_error; #[async_trait] -pub trait GetBlobData: Clone + std::fmt::Debug + Send + Sync { +pub trait GetBlobData: std::fmt::Debug + Send + Sync { async fn get_blob_data(&self, input: &str) -> anyhow::Result>>; + + fn clone_boxed(&self) -> Box; } /// EigenClient is a client for the Eigen DA service. #[derive(Debug, Clone)] -pub struct EigenClient { - pub(crate) client: Arc>, +pub struct EigenClient { + pub(crate) client: Arc, } -impl EigenClient { +impl EigenClient { pub async fn new( config: EigenConfig, secrets: EigenSecrets, - get_blob_data: Box, + get_blob_data: Box, ) -> anyhow::Result { let private_key = SecretKey::from_str(secrets.private_key.0.expose_secret().as_str()) .map_err(|e| anyhow::anyhow!("Failed to parse private key: {}", e))?; @@ -40,7 +42,7 @@ impl EigenClient { } #[async_trait] -impl DataAvailabilityClient for EigenClient { +impl DataAvailabilityClient for EigenClient { async fn dispatch_blob( &self, _: u32, // batch number @@ -75,6 +77,6 @@ impl DataAvailabilityClient for EigenClient { } fn blob_size_limit(&self) -> Option { - Some(RawEigenClient::::blob_size_limit()) + Some(RawEigenClient::blob_size_limit()) } } diff --git a/core/node/da_clients/src/eigen/client_tests.rs b/core/node/da_clients/src/eigen/client_tests.rs index 99cd81cf2797..4fbbd5c36761 100644 --- a/core/node/da_clients/src/eigen/client_tests.rs +++ b/core/node/da_clients/src/eigen/client_tests.rs @@ -17,7 +17,7 @@ mod tests { use crate::eigen::{blob_info::BlobInfo, EigenClient, GetBlobData}; - impl EigenClient { + impl EigenClient { pub async fn get_blob_data( &self, blob_id: BlobInfo, @@ -32,8 +32,8 @@ mod tests { const STATUS_QUERY_TIMEOUT: u64 = 1800000; // 30 minutes const STATUS_QUERY_INTERVAL: u64 = 5; // 5 ms - async fn get_blob_info( - client: &EigenClient, + async fn get_blob_info( + client: &EigenClient, result: &DispatchResponse, ) -> anyhow::Result { let blob_info = (|| async { @@ -62,6 +62,10 @@ mod tests { async fn get_blob_data(&self, _input: &'_ str) -> anyhow::Result>> { Ok(None) } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } } #[ignore = "depends on external RPC"] diff --git a/core/node/da_clients/src/eigen/sdk.rs b/core/node/da_clients/src/eigen/sdk.rs index c1e52e42ffc9..3a3b1202690c 100644 --- a/core/node/da_clients/src/eigen/sdk.rs +++ b/core/node/da_clients/src/eigen/sdk.rs @@ -7,6 +7,7 @@ use tonic::{ transport::{Channel, ClientTlsConfig, Endpoint}, Streaming, }; +use url::Url; use zksync_config::EigenConfig; use zksync_da_client::types::DAError; use zksync_eth_client::clients::PKSigningClient; @@ -30,24 +31,36 @@ use crate::eigen::{ verifier::VerificationError, }; -#[derive(Debug, Clone)] -pub(crate) struct RawEigenClient { +#[derive(Debug)] +pub(crate) struct RawEigenClient { client: Arc>>, private_key: SecretKey, pub config: EigenConfig, verifier: Verifier, - get_blob_data: Box, + get_blob_data: Box, +} + +impl Clone for RawEigenClient { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + private_key: self.private_key, + config: self.config.clone(), + verifier: self.verifier.clone(), + get_blob_data: self.get_blob_data.clone_boxed(), + } + } } pub(crate) const DATA_CHUNK_SIZE: usize = 32; -impl RawEigenClient { +impl RawEigenClient { const BLOB_SIZE_LIMIT: usize = 1024 * 1024 * 2; // 2 MB pub async fn new( private_key: SecretKey, config: EigenConfig, - get_blob_data: Box, + get_blob_data: Box, ) -> anyhow::Result { let endpoint = Endpoint::from_str(config.disperser_rpc.as_str())?.tls_config(ClientTlsConfig::new())?; @@ -60,8 +73,8 @@ impl RawEigenClient { .ok_or(anyhow::anyhow!("EigenDA ETH RPC not set"))?, svc_manager_addr: Address::from_str(&config.eigenda_svc_manager_address)?, max_blob_size: Self::BLOB_SIZE_LIMIT as u32, - g1_url: config.g1_url.clone(), - g2_url: config.g2_url.clone(), + g1_url: Url::parse(&config.g1_url)?, + g2_url: Url::parse(&config.g2_url)?, settlement_layer_confirmation_depth: config.settlement_layer_confirmation_depth, private_key: hex::encode(private_key.secret_bytes()), chain_id: config.chain_id, diff --git a/core/node/da_clients/src/eigen/verifier.rs b/core/node/da_clients/src/eigen/verifier.rs index bfb68f6d0068..b6d196df47bc 100644 --- a/core/node/da_clients/src/eigen/verifier.rs +++ b/core/node/da_clients/src/eigen/verifier.rs @@ -1,9 +1,9 @@ -use std::{collections::HashMap, fs::File, io::copy, path::Path}; +use std::{collections::HashMap, path::Path}; use ark_bn254::{Fq, G1Affine}; use ethabi::{encode, ParamType, Token}; use rust_kzg_bn254::{blob::Blob, kzg::Kzg, polynomial::PolynomialFormat}; -use tiny_keccak::{Hasher, Keccak}; +use tokio::{fs::File, io::AsyncWriteExt}; use url::Url; use zksync_basic_types::web3::CallRequest; use zksync_eth_client::{clients::PKSigningClient, EnrichedClientResult}; @@ -70,8 +70,8 @@ pub struct VerifierConfig { pub rpc_url: String, pub svc_manager_addr: Address, pub max_blob_size: u32, - pub g1_url: String, - pub g2_url: String, + pub g1_url: Url, + pub g2_url: Url, pub settlement_layer_confirmation_depth: u32, pub private_key: String, pub chain_id: u64, @@ -104,8 +104,7 @@ impl Verifier { pub const G2POINT: &'static str = "g2.point.powerOf2"; pub const POINT_SIZE: u32 = 32; - async fn save_point(url: String, point: String) -> Result<(), VerificationError> { - let url = Url::parse(&url).map_err(|_| VerificationError::LinkError)?; + async fn save_point(url: Url, point: String) -> Result<(), VerificationError> { let response = reqwest::get(url) .await .map_err(|_| VerificationError::LinkError)?; @@ -114,17 +113,19 @@ impl Verifier { } let path = format!("./{}", point); let path = Path::new(&path); - let mut file = File::create(path).map_err(|_| VerificationError::LinkError)?; + let mut file = File::create(path).await.map_err(|_| VerificationError::LinkError)?; let content = response .bytes() .await .map_err(|_| VerificationError::LinkError)?; - copy(&mut content.as_ref(), &mut file).map_err(|_| VerificationError::LinkError)?; + file.write_all(&content) + .await + .map_err(|_| VerificationError::LinkError)?; Ok(()) } - async fn save_points(url_g1: String, url_g2: String) -> Result { - Self::save_point(url_g1.clone(), Self::G1POINT.to_string()).await?; - Self::save_point(url_g2.clone(), Self::G2POINT.to_string()).await?; + async fn save_points(url_g1: Url, url_g2: Url) -> Result { + Self::save_point(url_g1, Self::G1POINT.to_string()).await?; + Self::save_point(url_g2, Self::G2POINT.to_string()).await?; Ok(".".to_string()) } @@ -134,15 +135,20 @@ impl Verifier { ) -> Result { let srs_points_to_load = cfg.max_blob_size / Self::POINT_SIZE; let path = Self::save_points(cfg.clone().g1_url, cfg.clone().g2_url).await?; - let kzg = Kzg::setup( - &format!("{}/{}", path, Self::G1POINT), - "", - &format!("{}/{}", path, Self::G2POINT), - Self::SRSORDER, - srs_points_to_load, - "".to_string(), - ); - let kzg = kzg.map_err(|e| { + let kzg_handle = tokio::task::spawn_blocking(move || { + Kzg::setup( + &format!("{}/{}", path, Self::G1POINT), + "", + &format!("{}/{}", path, Self::G2POINT), + Self::SRSORDER, + srs_points_to_load, + "".to_string(), + ) + }); + let kzg = kzg_handle.await.map_err(|e| { + tracing::error!("Failed to setup KZG: {:?}", e); + VerificationError::KzgError + })?.map_err(|e| { tracing::error!("Failed to setup KZG: {:?}", e); VerificationError::KzgError })?; @@ -206,12 +212,7 @@ impl Verifier { ]); let encoded = encode(&[blob_header]); - - let mut keccak = Keccak::v256(); - keccak.update(&encoded); - let mut hash = [0u8; 32]; - keccak.finalize(&mut hash); - hash.to_vec() + web3::keccak256(&encoded).to_vec() } pub fn process_inclusion_proof( @@ -234,11 +235,7 @@ impl Verifier { buffer[..32].copy_from_slice(&proof[i * 32..(i + 1) * 32]); buffer[32..].copy_from_slice(&computed_hash); } - let mut keccak = Keccak::v256(); - keccak.update(&buffer); - let mut hash = [0u8; 32]; - keccak.finalize(&mut hash); - computed_hash = hash.to_vec(); + computed_hash = web3::keccak256(&buffer).to_vec(); index /= 2; } @@ -257,10 +254,7 @@ impl Verifier { let blob_header = &cert.blob_header; let blob_header_hash = self.hash_encode_blob_header(blob_header); - let mut keccak = Keccak::v256(); - keccak.update(&blob_header_hash); - let mut leaf_hash = [0u8; 32]; - keccak.finalize(&mut leaf_hash); + let leaf_hash = web3::keccak256(&blob_header_hash).to_vec(); let generated_root = self.process_inclusion_proof(inclusion_proof, &leaf_hash, blob_index)?; @@ -285,11 +279,7 @@ impl Verifier { ]); let encoded = encode(&[batch_header_token]); - - let mut keccak = Keccak::v256(); - keccak.update(&encoded); - let mut header_hash = [0u8; 32]; - keccak.finalize(&mut header_hash); + let header_hash = web3::keccak256(&encoded).to_vec(); let hash_token = Token::Tuple(vec![ Token::FixedBytes(header_hash.to_vec()), @@ -299,13 +289,7 @@ impl Verifier { let mut hash_encoded = encode(&[hash_token]); hash_encoded.append(&mut confirmation_block_number.to_be_bytes().to_vec()); - - let mut keccak = Keccak::v256(); - keccak.update(&hash_encoded); - let mut hash = [0u8; 32]; - keccak.finalize(&mut hash); - - hash.to_vec() + web3::keccak256(&hash_encoded).to_vec() } /// Retrieves the block to make the request to the service manager @@ -318,10 +302,12 @@ impl Verifier { .map_err(|_| VerificationError::ServiceManagerError)? .as_u64(); - if self.cfg.settlement_layer_confirmation_depth == 0 { - return Ok(latest); - } - Ok(latest - (self.cfg.settlement_layer_confirmation_depth as u64 - 1)) + let depth = self + .cfg + .settlement_layer_confirmation_depth + .saturating_sub(1); + let block_to_return = latest.saturating_sub(depth as u64); + Ok(block_to_return) } async fn call_batch_id_to_metadata_hash( @@ -385,39 +371,17 @@ impl Verifier { Ok(()) } - fn decode_bytes(&self, encoded: Vec) -> Result, String> { - // Ensure the input has at least 64 bytes (offset + length) - if encoded.len() < 64 { - return Err("Encoded data is too short".to_string()); - } - - // Read the offset (first 32 bytes) - let offset = usize::from_be_bytes( - encoded[24..32] - .try_into() - .map_err(|_| "Offset is too large")?, - ); - - // Check if offset is valid - if offset + 32 > encoded.len() { - return Err("Offset points outside the encoded data".to_string()); - } - - // Read the length (32 bytes at the offset position) - let length = usize::from_be_bytes( - encoded[offset + 24..offset + 32] - .try_into() - .map_err(|_| "Length is too large")?, - ); - - // Check if the length is valid - if offset + 32 + length > encoded.len() { - return Err("Length extends beyond the encoded data".to_string()); + fn decode_bytes(&self, encoded: Vec) -> Result, VerificationError> { + let output_type = [ParamType::Bytes]; + let tokens: Vec = ethabi::decode(&output_type, &encoded) + .map_err(|_| VerificationError::ServiceManagerError)?; + let token = tokens + .first() + .ok_or(VerificationError::ServiceManagerError)?; + match token { + Token::Bytes(data) => Ok(data.to_vec()), + _ => Err(VerificationError::ServiceManagerError), } - - // Extract the bytes data - let data = encoded[offset + 32..offset + 32 + length].to_vec(); - Ok(data) } async fn get_quorum_adversary_threshold( @@ -440,9 +404,7 @@ impl Verifier { .await .map_err(|_| VerificationError::ServiceManagerError)?; - let percentages = self - .decode_bytes(res.0.to_vec()) - .map_err(|_| VerificationError::ServiceManagerError)?; + let percentages = self.decode_bytes(res.0.to_vec())?; if percentages.len() > quorum_number as usize { return Ok(percentages[quorum_number as usize]); @@ -467,7 +429,6 @@ impl Verifier { .map_err(|_| VerificationError::ServiceManagerError)?; self.decode_bytes(res.0.to_vec()) - .map_err(|_| VerificationError::ServiceManagerError) } /// Verifies that the certificate's blob quorum params are correct diff --git a/core/node/da_clients/src/eigen/verifier_tests.rs b/core/node/da_clients/src/eigen/verifier_tests.rs index 1d25aa082c8c..35c78dd8d9d3 100644 --- a/core/node/da_clients/src/eigen/verifier_tests.rs +++ b/core/node/da_clients/src/eigen/verifier_tests.rs @@ -2,6 +2,7 @@ mod test { use std::{collections::HashMap, str::FromStr}; + use url::Url; use zksync_eth_client::{clients::PKSigningClient, EnrichedClientResult}; use zksync_types::{ url::SensitiveUrl, @@ -23,8 +24,8 @@ mod test { rpc_url: "https://ethereum-holesky-rpc.publicnode.com".to_string(), svc_manager_addr: Address::from_str("0xD4A7E1Bd8015057293f0D0A557088c286942e84b").unwrap(), max_blob_size: 2 * 1024 * 1024, - g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), - g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + g1_url: Url::parse("https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point").unwrap(), + g2_url: Url::parse("https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2").unwrap(), settlement_layer_confirmation_depth: 0, private_key: "0xd08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6" .to_string(), diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs index e24ff7ef65b4..515c2bb3d834 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs @@ -74,4 +74,8 @@ impl GetBlobData for GetBlobFromDB { .await?; Ok(batch.map(|b| b.pubdata)) } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } }