diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index c3898055f848..ddf856a98e26 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "25.2.0", + "core": "25.3.0", "prover": "17.1.1", "zkstack_cli": "0.1.2" } diff --git a/Cargo.lock b/Cargo.lock index fae21fd42b39..3975539b0de1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12784,7 +12784,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "25.2.0" +version = "25.3.0" dependencies = [ "anyhow", "assert_matches", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 0ca0a3be025a..acdd2fefb1ab 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,27 @@ # Changelog +## [25.3.0](https://github.com/matter-labs/zksync-era/compare/core-v25.2.0...core-v25.3.0) (2024-12-11) + + +### Features + +* change seal criteria for gateway ([#3320](https://github.com/matter-labs/zksync-era/issues/3320)) ([a0a74aa](https://github.com/matter-labs/zksync-era/commit/a0a74aaeb42f076d20c4ae8a32925eff2de11d0c)) +* **contract-verifier:** Download compilers from GH automatically ([#3291](https://github.com/matter-labs/zksync-era/issues/3291)) ([a10c4ba](https://github.com/matter-labs/zksync-era/commit/a10c4baa312f26ebac2a10115fb7bd314d18b9c1)) +* integrate gateway changes for some components ([#3274](https://github.com/matter-labs/zksync-era/issues/3274)) ([cbc91e3](https://github.com/matter-labs/zksync-era/commit/cbc91e35f84d04f2e4c8e81028596db009e478d1)) +* **proof-data-handler:** exclude batches without object file in GCS ([#2980](https://github.com/matter-labs/zksync-era/issues/2980)) ([3e309e0](https://github.com/matter-labs/zksync-era/commit/3e309e06b24649c74bfe120e8ca45247cb2b5628)) +* **pruning:** Record L1 batch root hash in pruning logs ([#3266](https://github.com/matter-labs/zksync-era/issues/3266)) ([7b6e590](https://github.com/matter-labs/zksync-era/commit/7b6e59083cf0cafeaef5dd4b2dd39257ff91316d)) +* **state-keeper:** mempool io opens batch if there is protocol upgrade tx ([#3360](https://github.com/matter-labs/zksync-era/issues/3360)) ([f6422cd](https://github.com/matter-labs/zksync-era/commit/f6422cd59dab2c105bb7c125c172f2621fe39464)) +* **tee:** add error handling for unstable_getTeeProofs API endpoint ([#3321](https://github.com/matter-labs/zksync-era/issues/3321)) ([26f630c](https://github.com/matter-labs/zksync-era/commit/26f630cb75958c711d67d13bc77ddbb1117156c3)) +* **zksync_cli:** Health checkpoint improvements ([#3193](https://github.com/matter-labs/zksync-era/issues/3193)) ([440fe8d](https://github.com/matter-labs/zksync-era/commit/440fe8d8afdf0fc2768692a1b40b0910873e2faf)) + + +### Bug Fixes + +* **api:** batch fee input scaling for `debug_traceCall` ([#3344](https://github.com/matter-labs/zksync-era/issues/3344)) ([7ace594](https://github.com/matter-labs/zksync-era/commit/7ace594fb3140212bd94ffd6bffcac99805cf4b1)) +* **tee:** correct previous fix for race condition in batch locking ([#3358](https://github.com/matter-labs/zksync-era/issues/3358)) ([b12da8d](https://github.com/matter-labs/zksync-era/commit/b12da8d1fddc7870bf17d5e08312d20773815269)) +* **tee:** fix race condition in batch locking ([#3342](https://github.com/matter-labs/zksync-era/issues/3342)) ([a7dc0ed](https://github.com/matter-labs/zksync-era/commit/a7dc0ed5007f6b2f789f4c61cb3d137843151860)) +* **tracer:** adds vm error to flatCallTracer error field if exists ([#3374](https://github.com/matter-labs/zksync-era/issues/3374)) ([5d77727](https://github.com/matter-labs/zksync-era/commit/5d77727cd3ba5f4d84643fee1873f03656310b4d)) + ## [25.2.0](https://github.com/matter-labs/zksync-era/compare/core-v25.1.0...core-v25.2.0) (2024-11-19) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 91bdcefa2ec0..f56af827bc45 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "25.2.0" # x-release-please-version +version = "25.3.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index d74928e8fbc7..547b18460390 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -525,6 +525,7 @@ impl MainNodeBuilder { }; let secrets = try_load_config!(self.secrets.data_availability); + let l1_secrets = try_load_config!(self.secrets.l1); match (da_client_config, secrets) { (DAClientConfig::Avail(config), DataAvailabilitySecrets::Avail(secret)) => { self.node.add_layer(AvailWiringLayer::new(config, secret)); @@ -535,7 +536,10 @@ impl MainNodeBuilder { .add_layer(CelestiaWiringLayer::new(config, secret)); } - (DAClientConfig::Eigen(config), DataAvailabilitySecrets::Eigen(secret)) => { + (DAClientConfig::Eigen(mut config), DataAvailabilitySecrets::Eigen(secret)) => { + if config.eigenda_eth_rpc.is_none() { + config.eigenda_eth_rpc = Some(l1_secrets.l1_rpc_url.expose_str().to_string()); + } self.node.add_layer(EigenWiringLayer::new(config, secret)); } diff --git a/core/lib/config/src/configs/da_client/eigen.rs b/core/lib/config/src/configs/da_client/eigen.rs index 68b1390d980a..b7723e2271a6 100644 --- a/core/lib/config/src/configs/da_client/eigen.rs +++ b/core/lib/config/src/configs/da_client/eigen.rs @@ -1,15 +1,15 @@ use serde::Deserialize; use zksync_basic_types::secrets::PrivateKey; /// Configuration for the EigenDA remote disperser client. -#[derive(Clone, Debug, PartialEq, Deserialize, Default)] +#[derive(Clone, Debug, PartialEq, Deserialize)] pub struct EigenConfig { /// URL of the Disperser RPC server pub disperser_rpc: String, /// Block height needed to reach in order to consider the blob finalized /// a value less or equal to 0 means that the disperser will not wait for finalization - pub settlement_layer_confirmation_depth: i32, + pub settlement_layer_confirmation_depth: u32, /// URL of the Ethereum RPC server - pub eigenda_eth_rpc: String, + pub eigenda_eth_rpc: Option, /// Address of the service manager contract pub eigenda_svc_manager_address: String, /// Wait for the blob to be finalized before returning the response @@ -24,6 +24,22 @@ pub struct EigenConfig { pub chain_id: u64, } +impl Default for EigenConfig { + fn default() -> Self { + Self { + disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), + settlement_layer_confirmation_depth: 0, + eigenda_eth_rpc: Some("https://ethereum-holesky-rpc.publicnode.com".to_string()), + eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + wait_for_finalization: false, + authenticated: false, + g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + chain_id: 19000, + } + } +} + #[derive(Clone, Debug, PartialEq)] pub struct EigenSecrets { pub private_key: PrivateKey, diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index c8bf1b3b8995..e9ad6bd3c074 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -6,7 +6,6 @@ pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000; pub const DEFAULT_MAX_ROWS_TO_DISPATCH: u32 = 100; pub const DEFAULT_MAX_RETRIES: u16 = 5; pub const DEFAULT_USE_DUMMY_INCLUSION_DATA: bool = false; -pub const DEFAULT_MAX_CONCURRENT_REQUESTS: u32 = 100; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { @@ -20,8 +19,6 @@ pub struct DADispatcherConfig { // TODO: run a verification task to check if the L1 contract expects the inclusion proofs to // avoid the scenario where contracts expect real proofs, and server is using dummy proofs. pub use_dummy_inclusion_data: Option, - /// The maximun number of concurrent request to send to the DA server. - pub max_concurrent_requests: Option, } impl DADispatcherConfig { @@ -31,7 +28,6 @@ impl DADispatcherConfig { max_rows_to_dispatch: Some(DEFAULT_MAX_ROWS_TO_DISPATCH), max_retries: Some(DEFAULT_MAX_RETRIES), use_dummy_inclusion_data: Some(DEFAULT_USE_DUMMY_INCLUSION_DATA), - max_concurrent_requests: Some(DEFAULT_MAX_CONCURRENT_REQUESTS), } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 5464d82d1ef2..1a3f63d9b278 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -972,7 +972,6 @@ impl Distribution for EncodeDist { max_rows_to_dispatch: self.sample(rng), max_retries: self.sample(rng), use_dummy_inclusion_data: self.sample(rng), - max_concurrent_requests: self.sample(rng), } } } diff --git a/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql b/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql new file mode 100644 index 000000000000..938d2e09de44 --- /dev/null +++ b/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql @@ -0,0 +1 @@ +CREATE INDEX idx_blob_id_l1_batch_number ON data_availability (blob_id, l1_batch_number); diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 3815d65f20b4..3a2cd1ecdd62 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -175,45 +175,6 @@ impl DataAvailabilityDal<'_, '_> { .map(DataAvailabilityBlob::from)) } - pub async fn get_da_blob_ids_awaiting_inclusion( - &mut self, - ) -> DalResult>> { - let rows = sqlx::query!( - r#" - SELECT - l1_batch_number, - blob_id, - inclusion_data, - sent_at - FROM - data_availability - WHERE - inclusion_data IS NULL - ORDER BY - l1_batch_number - "#, - ) - .instrument("get_da_blobs_awaiting_inclusion") - .fetch_all(self.storage) - .await?; - - Ok(rows - .into_iter() - .map(|row| { - let l1_batch_number_u32 = row.l1_batch_number.try_into(); - if let Ok(l1_batch_number) = l1_batch_number_u32 { - Some(DataAvailabilityBlob { - l1_batch_number: L1BatchNumber(l1_batch_number), - blob_id: row.blob_id, - inclusion_data: row.inclusion_data, - sent_at: row.sent_at.and_utc(), - }) - } else { - None - } - }) - .collect()) - } /// Fetches the pubdata and `l1_batch_number` for the L1 batches that are ready for DA dispatch. pub async fn get_ready_for_da_dispatch_l1_batches( &mut self, diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index e867461b3c2a..deda1ce3f5c2 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -1,21 +1,17 @@ use std::env; -use zksync_config::{ - configs::{ - da_client::{ - avail::{ - AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, - AVAIL_GAS_RELAY_CLIENT_NAME, - }, - celestia::CelestiaSecrets, - eigen::EigenSecrets, - DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME, - EIGEN_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, +use zksync_config::configs::{ + da_client::{ + avail::{ + AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, }, - secrets::DataAvailabilitySecrets, - AvailConfig, + celestia::CelestiaSecrets, + eigen::EigenSecrets, + DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME, + EIGEN_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, }, - EigenConfig, + secrets::DataAvailabilitySecrets, + AvailConfig, }; use crate::{envy_load, FromEnv}; @@ -38,20 +34,7 @@ impl FromEnv for DAClientConfig { }, }), CELESTIA_CLIENT_CONFIG_NAME => Self::Celestia(envy_load("da_celestia_config", "DA_")?), - EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(EigenConfig { - disperser_rpc: env::var("EIGENDA_DISPERSER_RPC")?, - settlement_layer_confirmation_depth: env::var( - "EIGENDA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH", - )? - .parse()?, - eigenda_eth_rpc: env::var("EIGENDA_EIGENDA_ETH_RPC")?, - eigenda_svc_manager_address: env::var("EIGENDA_EIGENDA_SVC_MANAGER_ADDRESS")?, - wait_for_finalization: env::var("EIGENDA_WAIT_FOR_FINALIZATION")?.parse()?, - authenticated: env::var("EIGENDA_AUTHENTICATED")?.parse()?, - g1_url: env::var("EIGENDA_G1_URL")?.parse()?, - g2_url: env::var("EIGENDA_G2_URL")?.parse()?, - chain_id: env::var("EIGENDA_CHAIN_ID")?.parse()?, - }), + EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(envy_load("da_eigen_config", "DA_")?), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -265,15 +248,15 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" DA_CLIENT="Eigen" - EIGENDA_DISPERSER_RPC="http://localhost:8080" - EIGENDA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH=0 - EIGENDA_EIGENDA_ETH_RPC="http://localhost:8545" - EIGENDA_EIGENDA_SVC_MANAGER_ADDRESS="0x123" - EIGENDA_WAIT_FOR_FINALIZATION=true - EIGENDA_AUTHENTICATED=false - EIGENDA_G1_URL="resources1" - EIGENDA_G2_URL="resources2" - EIGENDA_CHAIN_ID=1 + DA_DISPERSER_RPC="http://localhost:8080" + DA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH=0 + DA_EIGENDA_ETH_RPC="http://localhost:8545" + DA_EIGENDA_SVC_MANAGER_ADDRESS="0x123" + DA_WAIT_FOR_FINALIZATION=true + DA_AUTHENTICATED=false + DA_G1_URL="resources1" + DA_G2_URL="resources2" + DA_CHAIN_ID=1 "#; lock.set_env(config); @@ -283,7 +266,7 @@ mod tests { DAClientConfig::Eigen(EigenConfig { disperser_rpc: "http://localhost:8080".to_string(), settlement_layer_confirmation_depth: 0, - eigenda_eth_rpc: "http://localhost:8545".to_string(), + eigenda_eth_rpc: Some("http://localhost:8545".to_string()), eigenda_svc_manager_address: "0x123".to_string(), wait_for_finalization: true, authenticated: false, diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index 805e6b2234b5..246752db91ac 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -21,14 +21,12 @@ mod tests { interval: u32, rows_limit: u32, max_retries: u16, - max_concurrent_requests: u32, ) -> DADispatcherConfig { DADispatcherConfig { polling_interval_ms: Some(interval), max_rows_to_dispatch: Some(rows_limit), max_retries: Some(max_retries), use_dummy_inclusion_data: Some(true), - max_concurrent_requests: Some(max_concurrent_requests), } } @@ -40,10 +38,9 @@ mod tests { DA_DISPATCHER_MAX_ROWS_TO_DISPATCH=60 DA_DISPATCHER_MAX_RETRIES=7 DA_DISPATCHER_USE_DUMMY_INCLUSION_DATA="true" - DA_DISPATCHER_MAX_CONCURRENT_REQUESTS=10 "#; lock.set_env(config); let actual = DADispatcherConfig::from_env().unwrap(); - assert_eq!(actual, expected_da_layer_config(5000, 60, 7, 10)); + assert_eq!(actual, expected_da_layer_config(5000, 60, 7)); } } diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index bd817826de7f..84e94aeae1a2 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -63,9 +63,7 @@ impl ProtoRepr for proto::DataAvailabilityClient { &conf.settlement_layer_confirmation_depth, ) .context("settlement_layer_confirmation_depth")?, - eigenda_eth_rpc: required(&conf.eigenda_eth_rpc) - .context("eigenda_eth_rpc")? - .clone(), + eigenda_eth_rpc: required(&conf.eigenda_eth_rpc).ok().cloned(), eigenda_svc_manager_address: required(&conf.eigenda_svc_manager_address) .context("eigenda_svc_manager_address")? .clone(), @@ -117,7 +115,7 @@ impl ProtoRepr for proto::DataAvailabilityClient { settlement_layer_confirmation_depth: Some( config.settlement_layer_confirmation_depth, ), - eigenda_eth_rpc: Some(config.eigenda_eth_rpc.clone()), + eigenda_eth_rpc: config.eigenda_eth_rpc.clone(), eigenda_svc_manager_address: Some(config.eigenda_svc_manager_address.clone()), wait_for_finalization: Some(config.wait_for_finalization), authenticated: Some(config.authenticated), diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index e85ff5ae76ed..d77073bd32cf 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -12,7 +12,6 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { max_rows_to_dispatch: self.max_rows_to_dispatch, max_retries: self.max_retries.map(|x| x as u16), use_dummy_inclusion_data: self.use_dummy_inclusion_data, - max_concurrent_requests: self.max_concurrent_requests, }) } @@ -22,7 +21,6 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { max_rows_to_dispatch: this.max_rows_to_dispatch, max_retries: this.max_retries.map(Into::into), use_dummy_inclusion_data: this.use_dummy_inclusion_data, - max_concurrent_requests: this.max_concurrent_requests, } } } diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index dd44d0ad14d1..cf4318c520d7 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -38,7 +38,7 @@ message CelestiaConfig { message EigenConfig { optional string disperser_rpc = 3; - optional int32 settlement_layer_confirmation_depth = 4; + optional uint32 settlement_layer_confirmation_depth = 4; optional string eigenda_eth_rpc = 5; optional string eigenda_svc_manager_address = 6; optional bool wait_for_finalization = 7; diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index d6329d14b281..dd366bd5b925 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -7,5 +7,4 @@ message DataAvailabilityDispatcher { optional uint32 max_rows_to_dispatch = 2; optional uint32 max_retries = 3; optional bool use_dummy_inclusion_data = 4; - optional uint32 max_concurrent_requests = 5; } diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index d96c1e659541..8e72f5b45991 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -96,16 +96,22 @@ impl DebugNamespace { CallType::NearCall => unreachable!("We have to filter our near calls before"), }; - let (result, error) = if let Some(error) = call.revert_reason { - (None, Some(error)) - } else { - ( + let (result, error) = match (call.revert_reason, call.error) { + (Some(revert_reason), _) => { + // If revert_reason exists, it takes priority over VM error + (None, Some(revert_reason)) + } + (None, Some(vm_error)) => { + // If no revert_reason but VM error exists + (None, Some(vm_error)) + } + (None, None) => ( Some(CallResult { output: web3::Bytes::from(call.output), gas_used: U256::from(call.gas_used), }), None, - ) + ), }; calls.push(DebugCallFlat { diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index f12042b12576..efc588faa081 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -62,7 +62,9 @@ ethabi.workspace = true rust-kzg-bn254.workspace = true ark-bn254.workspace = true num-bigint.workspace = true -serial_test.workspace = true zksync_web3_decl.workspace = true zksync_eth_client.workspace = true url.workspace = true + +[dev-dependencies] +serial_test.workspace = true diff --git a/core/node/da_clients/src/eigen/README.md b/core/node/da_clients/src/eigen/README.md index ae2398088dc0..bf6b5fb8038b 100644 --- a/core/node/da_clients/src/eigen/README.md +++ b/core/node/da_clients/src/eigen/README.md @@ -32,3 +32,6 @@ pub fn compile_protos() { The generated folder is considered a temporary solution until the EigenDA has a library with either a protogen, or preferably a full Rust client implementation. + +proto files are not included here to not create confusion in case they are not updated in time, so the EigenDA +[repo](https://github.com/Layr-Labs/eigenda/tree/master/api/proto) has to be a source of truth for the proto files. diff --git a/core/node/da_clients/src/eigen/blob_info.rs b/core/node/da_clients/src/eigen/blob_info.rs index a44117fd4ed7..63fece177c59 100644 --- a/core/node/da_clients/src/eigen/blob_info.rs +++ b/core/node/da_clients/src/eigen/blob_info.rs @@ -12,13 +12,13 @@ use super::{ #[derive(Debug)] pub enum ConversionError { - NotPresentError, + NotPresent, } impl fmt::Display for ConversionError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - ConversionError::NotPresentError => write!(f, "Failed to convert BlobInfo"), + ConversionError::NotPresent => write!(f, "Failed to convert BlobInfo"), } } } @@ -29,18 +29,6 @@ pub struct G1Commitment { pub y: Vec, } -impl G1Commitment { - pub fn to_bytes(&self) -> Vec { - let mut bytes = vec![]; - bytes.extend(&self.x.len().to_be_bytes()); - bytes.extend(&self.x); - bytes.extend(&self.y.len().to_be_bytes()); - bytes.extend(&self.y); - - bytes - } -} - impl From for G1Commitment { fn from(value: DisperserG1Commitment) -> Self { Self { @@ -58,18 +46,6 @@ pub struct BlobQuorumParam { pub chunk_length: u32, } -impl BlobQuorumParam { - pub fn to_bytes(&self) -> Vec { - let mut bytes = vec![]; - bytes.extend(&self.quorum_number.to_be_bytes()); - bytes.extend(&self.adversary_threshold_percentage.to_be_bytes()); - bytes.extend(&self.confirmation_threshold_percentage.to_be_bytes()); - bytes.extend(&self.chunk_length.to_be_bytes()); - - bytes - } -} - impl From for BlobQuorumParam { fn from(value: DisperserBlobQuorumParam) -> Self { Self { @@ -88,34 +64,16 @@ pub struct BlobHeader { pub blob_quorum_params: Vec, } -impl BlobHeader { - pub fn to_bytes(&self) -> Vec { - let mut bytes = vec![]; - bytes.extend(self.commitment.to_bytes()); - bytes.extend(&self.data_length.to_be_bytes()); - bytes.extend(&self.blob_quorum_params.len().to_be_bytes()); - - for quorum in &self.blob_quorum_params { - bytes.extend(quorum.to_bytes()); - } - - bytes - } -} - impl TryFrom for BlobHeader { type Error = ConversionError; fn try_from(value: DisperserBlobHeader) -> Result { - if value.commitment.is_none() { - return Err(ConversionError::NotPresentError); - } let blob_quorum_params: Vec = value .blob_quorum_params .iter() .map(|param| BlobQuorumParam::from(param.clone())) .collect(); Ok(Self { - commitment: G1Commitment::from(value.commitment.unwrap()), + commitment: G1Commitment::from(value.commitment.ok_or(ConversionError::NotPresent)?), data_length: value.data_length, blob_quorum_params, }) @@ -130,21 +88,6 @@ pub struct BatchHeader { pub reference_block_number: u32, } -impl BatchHeader { - pub fn to_bytes(&self) -> Vec { - let mut bytes = vec![]; - bytes.extend(&self.batch_root.len().to_be_bytes()); - bytes.extend(&self.batch_root); - bytes.extend(&self.quorum_numbers.len().to_be_bytes()); - bytes.extend(&self.quorum_numbers); - bytes.extend(&self.quorum_signed_percentages.len().to_be_bytes()); - bytes.extend(&self.quorum_signed_percentages); - bytes.extend(&self.reference_block_number.to_be_bytes()); - - bytes - } -} - impl From for BatchHeader { fn from(value: DisperserBatchHeader) -> Self { Self { @@ -165,25 +108,11 @@ pub struct BatchMetadata { pub batch_header_hash: Vec, } -impl BatchMetadata { - pub fn to_bytes(&self) -> Vec { - let mut bytes = vec![]; - bytes.extend(self.batch_header.to_bytes()); - bytes.extend(&self.signatory_record_hash); - bytes.extend(&self.confirmation_block_number.to_be_bytes()); - - bytes - } -} - impl TryFrom for BatchMetadata { type Error = ConversionError; fn try_from(value: DisperserBatchMetadata) -> Result { - if value.batch_header.is_none() { - return Err(ConversionError::NotPresentError); - } Ok(Self { - batch_header: BatchHeader::from(value.batch_header.unwrap()), + batch_header: BatchHeader::from(value.batch_header.ok_or(ConversionError::NotPresent)?), signatory_record_hash: value.signatory_record_hash, fee: value.fee, confirmation_block_number: value.confirmation_block_number, @@ -201,31 +130,15 @@ pub struct BlobVerificationProof { pub quorum_indexes: Vec, } -impl BlobVerificationProof { - pub fn to_bytes(&self) -> Vec { - let mut bytes = vec![]; - bytes.extend(&self.batch_id.to_be_bytes()); - bytes.extend(&self.blob_index.to_be_bytes()); - bytes.extend(self.batch_medatada.to_bytes()); - bytes.extend(&self.inclusion_proof.len().to_be_bytes()); - bytes.extend(&self.inclusion_proof); - bytes.extend(&self.quorum_indexes.len().to_be_bytes()); - bytes.extend(&self.quorum_indexes); - - bytes - } -} - impl TryFrom for BlobVerificationProof { type Error = ConversionError; fn try_from(value: DisperserBlobVerificationProof) -> Result { - if value.batch_metadata.is_none() { - return Err(ConversionError::NotPresentError); - } Ok(Self { batch_id: value.batch_id, blob_index: value.blob_index, - batch_medatada: BatchMetadata::try_from(value.batch_metadata.unwrap())?, + batch_medatada: BatchMetadata::try_from( + value.batch_metadata.ok_or(ConversionError::NotPresent)?, + )?, inclusion_proof: value.inclusion_proof, quorum_indexes: value.quorum_indexes, }) @@ -238,28 +151,17 @@ pub struct BlobInfo { pub blob_verification_proof: BlobVerificationProof, } -impl BlobInfo { - pub fn to_bytes(&self) -> Vec { - let mut bytes = vec![]; - let blob_header_bytes = self.blob_header.to_bytes(); - bytes.extend(blob_header_bytes.len().to_be_bytes()); - bytes.extend(blob_header_bytes); - let blob_verification_proof_bytes = self.blob_verification_proof.to_bytes(); - bytes.extend(blob_verification_proof_bytes); - bytes - } -} - impl TryFrom for BlobInfo { type Error = ConversionError; fn try_from(value: DisperserBlobInfo) -> Result { - if value.blob_header.is_none() || value.blob_verification_proof.is_none() { - return Err(ConversionError::NotPresentError); - } Ok(Self { - blob_header: BlobHeader::try_from(value.blob_header.unwrap())?, + blob_header: BlobHeader::try_from( + value.blob_header.ok_or(ConversionError::NotPresent)?, + )?, blob_verification_proof: BlobVerificationProof::try_from( - value.blob_verification_proof.unwrap(), + value + .blob_verification_proof + .ok_or(ConversionError::NotPresent)?, )?, }) } diff --git a/core/node/da_clients/src/eigen/client.rs b/core/node/da_clients/src/eigen/client.rs index 430b5bb4c4a7..5baee9475e92 100644 --- a/core/node/da_clients/src/eigen/client.rs +++ b/core/node/da_clients/src/eigen/client.rs @@ -13,21 +13,23 @@ use super::sdk::RawEigenClient; use crate::utils::to_retriable_da_error; #[async_trait] -pub trait GetBlobData: Clone + std::fmt::Debug + Send + Sync { - async fn call(&self, input: &str) -> anyhow::Result>>; +pub trait GetBlobData: std::fmt::Debug + Send + Sync { + async fn get_blob_data(&self, input: &str) -> anyhow::Result>>; + + fn clone_boxed(&self) -> Box; } /// EigenClient is a client for the Eigen DA service. #[derive(Debug, Clone)] -pub struct EigenClient { - pub(crate) client: Arc>, +pub struct EigenClient { + pub(crate) client: Arc, } -impl EigenClient { +impl EigenClient { pub async fn new( config: EigenConfig, secrets: EigenSecrets, - get_blob_data: Box, + get_blob_data: Box, ) -> anyhow::Result { let private_key = SecretKey::from_str(secrets.private_key.0.expose_secret().as_str()) .map_err(|e| anyhow::anyhow!("Failed to parse private key: {}", e))?; @@ -40,7 +42,7 @@ impl EigenClient { } #[async_trait] -impl DataAvailabilityClient for EigenClient { +impl DataAvailabilityClient for EigenClient { async fn dispatch_blob( &self, _: u32, // batch number @@ -75,6 +77,6 @@ impl DataAvailabilityClient for EigenClient { } fn blob_size_limit(&self) -> Option { - Some(RawEigenClient::::blob_size_limit()) + Some(RawEigenClient::blob_size_limit()) } } diff --git a/core/node/da_clients/src/eigen/client_tests.rs b/core/node/da_clients/src/eigen/client_tests.rs index e504e4892b61..4fbbd5c36761 100644 --- a/core/node/da_clients/src/eigen/client_tests.rs +++ b/core/node/da_clients/src/eigen/client_tests.rs @@ -17,7 +17,7 @@ mod tests { use crate::eigen::{blob_info::BlobInfo, EigenClient, GetBlobData}; - impl EigenClient { + impl EigenClient { pub async fn get_blob_data( &self, blob_id: BlobInfo, @@ -32,8 +32,8 @@ mod tests { const STATUS_QUERY_TIMEOUT: u64 = 1800000; // 30 minutes const STATUS_QUERY_INTERVAL: u64 = 5; // 5 ms - async fn get_blob_info( - client: &EigenClient, + async fn get_blob_info( + client: &EigenClient, result: &DispatchResponse, ) -> anyhow::Result { let blob_info = (|| async { @@ -59,9 +59,13 @@ mod tests { #[async_trait::async_trait] impl GetBlobData for MockGetBlobData { - async fn call(&self, _input: &'_ str) -> anyhow::Result>> { + async fn get_blob_data(&self, _input: &'_ str) -> anyhow::Result>> { Ok(None) } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } } #[ignore = "depends on external RPC"] @@ -70,8 +74,8 @@ mod tests { async fn test_non_auth_dispersal() { let config = EigenConfig { disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), - settlement_layer_confirmation_depth: -1, - eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + settlement_layer_confirmation_depth: 0, + eigenda_eth_rpc: Some("https://ethereum-holesky-rpc.publicnode.com".to_string()), eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), wait_for_finalization: false, authenticated: false, @@ -110,8 +114,8 @@ mod tests { async fn test_auth_dispersal() { let config = EigenConfig { disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), - settlement_layer_confirmation_depth: -1, - eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + settlement_layer_confirmation_depth: 0, + eigenda_eth_rpc: Some("https://ethereum-holesky-rpc.publicnode.com".to_string()), eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), wait_for_finalization: false, authenticated: true, @@ -155,7 +159,7 @@ mod tests { g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), settlement_layer_confirmation_depth: 0, - eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + eigenda_eth_rpc: Some("https://ethereum-holesky-rpc.publicnode.com".to_string()), eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), chain_id: 17000, }; @@ -191,7 +195,7 @@ mod tests { let config = EigenConfig { disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), settlement_layer_confirmation_depth: 5, - eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + eigenda_eth_rpc: Some("https://ethereum-holesky-rpc.publicnode.com".to_string()), eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), wait_for_finalization: false, authenticated: false, @@ -231,7 +235,7 @@ mod tests { let config = EigenConfig { disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), settlement_layer_confirmation_depth: 5, - eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + eigenda_eth_rpc: Some("https://ethereum-holesky-rpc.publicnode.com".to_string()), eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), wait_for_finalization: false, authenticated: true, diff --git a/core/node/da_clients/src/eigen/sdk.rs b/core/node/da_clients/src/eigen/sdk.rs index b00a9a959887..3a3b1202690c 100644 --- a/core/node/da_clients/src/eigen/sdk.rs +++ b/core/node/da_clients/src/eigen/sdk.rs @@ -7,10 +7,11 @@ use tonic::{ transport::{Channel, ClientTlsConfig, Endpoint}, Streaming, }; +use url::Url; use zksync_config::EigenConfig; use zksync_da_client::types::DAError; use zksync_eth_client::clients::PKSigningClient; -use zksync_types::{url::SensitiveUrl, K256PrivateKey, SLChainId, H160}; +use zksync_types::{url::SensitiveUrl, Address, K256PrivateKey, SLChainId}; use zksync_web3_decl::client::{Client, DynClient, L1}; use super::{ @@ -27,39 +28,54 @@ use crate::eigen::{ disperser_client::DisperserClient, AuthenticatedReply, BlobAuthHeader, }, + verifier::VerificationError, }; -#[derive(Debug, Clone)] -pub(crate) struct RawEigenClient { +#[derive(Debug)] +pub(crate) struct RawEigenClient { client: Arc>>, private_key: SecretKey, pub config: EigenConfig, verifier: Verifier, - get_blob_data: Box, + get_blob_data: Box, +} + +impl Clone for RawEigenClient { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + private_key: self.private_key, + config: self.config.clone(), + verifier: self.verifier.clone(), + get_blob_data: self.get_blob_data.clone_boxed(), + } + } } pub(crate) const DATA_CHUNK_SIZE: usize = 32; -impl RawEigenClient { +impl RawEigenClient { const BLOB_SIZE_LIMIT: usize = 1024 * 1024 * 2; // 2 MB pub async fn new( private_key: SecretKey, config: EigenConfig, - get_blob_data: Box, + get_blob_data: Box, ) -> anyhow::Result { let endpoint = Endpoint::from_str(config.disperser_rpc.as_str())?.tls_config(ClientTlsConfig::new())?; let client = Arc::new(Mutex::new(DisperserClient::connect(endpoint).await?)); let verifier_config = VerifierConfig { - rpc_url: config.eigenda_eth_rpc.clone(), - svc_manager_addr: config.eigenda_svc_manager_address.clone(), + rpc_url: config + .eigenda_eth_rpc + .clone() + .ok_or(anyhow::anyhow!("EigenDA ETH RPC not set"))?, + svc_manager_addr: Address::from_str(&config.eigenda_svc_manager_address)?, max_blob_size: Self::BLOB_SIZE_LIMIT as u32, - g1_url: config.g1_url.clone(), - g2_url: config.g2_url.clone(), - settlement_layer_confirmation_depth: config.settlement_layer_confirmation_depth.max(0) - as u32, + g1_url: Url::parse(&config.g1_url)?, + g2_url: Url::parse(&config.g2_url)?, + settlement_layer_confirmation_depth: config.settlement_layer_confirmation_depth, private_key: hex::encode(private_key.secret_bytes()), chain_id: config.chain_id, }; @@ -71,7 +87,7 @@ impl RawEigenClient { K256PrivateKey::from_bytes(zksync_types::H256::from_str( &verifier_config.private_key, )?)?, - H160::from_str(&verifier_config.svc_manager_addr)?, + verifier_config.svc_manager_addr, Verifier::DEFAULT_PRIORITY_FEE_PER_GAS, SLChainId(verifier_config.chain_id), query_client, @@ -109,7 +125,16 @@ impl RawEigenClient { .await? .into_inner(); - Ok(hex::encode(disperse_reply.request_id)) + match disperser::BlobStatus::try_from(disperse_reply.result)? { + disperser::BlobStatus::Failed + | disperser::BlobStatus::InsufficientSignatures + | disperser::BlobStatus::Unknown => Err(anyhow::anyhow!("Blob dispatch failed")), + + disperser::BlobStatus::Dispersing + | disperser::BlobStatus::Processing + | disperser::BlobStatus::Finalized + | disperser::BlobStatus::Confirmed => Ok(hex::encode(disperse_reply.request_id)), + } } async fn dispatch_blob_authenticated(&self, data: Vec) -> anyhow::Result { @@ -147,11 +172,21 @@ impl RawEigenClient { let disperser::authenticated_reply::Payload::DisperseReply(disperse_reply) = reply else { return Err(anyhow::anyhow!("Unexpected response from server")); }; - Ok(hex::encode(disperse_reply.request_id)) + + match disperser::BlobStatus::try_from(disperse_reply.result)? { + disperser::BlobStatus::Failed + | disperser::BlobStatus::InsufficientSignatures + | disperser::BlobStatus::Unknown => Err(anyhow::anyhow!("Blob dispatch failed")), + + disperser::BlobStatus::Dispersing + | disperser::BlobStatus::Processing + | disperser::BlobStatus::Finalized + | disperser::BlobStatus::Confirmed => Ok(hex::encode(disperse_reply.request_id)), + } } - pub async fn get_commitment(&self, blob_id: &str) -> anyhow::Result> { - let blob_info = self.try_get_inclusion_data(blob_id.to_string()).await?; + pub async fn get_commitment(&self, request_id: &str) -> anyhow::Result> { + let blob_info = self.try_get_inclusion_data(request_id.to_string()).await?; let Some(blob_info) = blob_info else { return Ok(None); @@ -162,7 +197,7 @@ impl RawEigenClient { let Some(data) = self.get_blob_data(blob_info.clone()).await? else { return Err(anyhow::anyhow!("Failed to get blob data")); }; - let data_db = self.get_blob_data.call(blob_id).await?; + let data_db = self.get_blob_data.get_blob_data(request_id).await?; if let Some(data_db) = data_db { if data_db != data { return Err(anyhow::anyhow!( @@ -179,16 +214,19 @@ impl RawEigenClient { .verify_inclusion_data_against_settlement_layer(blob_info.clone()) .await; // in case of an error, the dispatcher will retry, so the need to return None - if result.is_err() { - return Ok(None); + if let Err(e) = result { + match e { + VerificationError::EmptyHash => return Ok(None), + _ => return Err(anyhow::anyhow!("Failed to verify inclusion data: {:?}", e)), + } } - tracing::info!("Blob dispatch confirmed, blob id: {}", blob_id); + tracing::info!("Blob dispatch confirmed, request id: {}", request_id); Ok(Some(blob_info)) } - pub async fn get_inclusion_data(&self, blob_id: &str) -> anyhow::Result>> { - let blob_info = self.get_commitment(blob_id).await?; + pub async fn get_inclusion_data(&self, request_id: &str) -> anyhow::Result>> { + let blob_info = self.get_commitment(request_id).await?; if let Some(blob_info) = blob_info { Ok(Some(blob_info.blob_verification_proof.inclusion_proof)) } else { diff --git a/core/node/da_clients/src/eigen/verifier.rs b/core/node/da_clients/src/eigen/verifier.rs index 6acd5398d220..b8f774c33a98 100644 --- a/core/node/da_clients/src/eigen/verifier.rs +++ b/core/node/da_clients/src/eigen/verifier.rs @@ -1,15 +1,15 @@ -use std::{collections::HashMap, fs::File, io::copy, path::Path, str::FromStr}; +use std::{collections::HashMap, path::Path}; use ark_bn254::{Fq, G1Affine}; use ethabi::{encode, ParamType, Token}; use rust_kzg_bn254::{blob::Blob, kzg::Kzg, polynomial::PolynomialFormat}; -use tiny_keccak::{Hasher, Keccak}; +use tokio::{fs::File, io::AsyncWriteExt}; use url::Url; use zksync_basic_types::web3::CallRequest; use zksync_eth_client::{clients::PKSigningClient, EnrichedClientResult}; use zksync_types::{ web3::{self, BlockId, BlockNumber}, - H160, U256, U64, + Address, U256, U64, }; use super::blob_info::{BatchHeader, BlobHeader, BlobInfo, G1Commitment}; @@ -68,10 +68,10 @@ pub enum VerificationError { #[derive(Debug, Clone)] pub struct VerifierConfig { pub rpc_url: String, - pub svc_manager_addr: String, + pub svc_manager_addr: Address, pub max_blob_size: u32, - pub g1_url: String, - pub g2_url: String, + pub g1_url: Url, + pub g2_url: Url, pub settlement_layer_confirmation_depth: u32, pub private_key: String, pub chain_id: u64, @@ -100,9 +100,11 @@ impl Clone for Verifier { impl Verifier { pub const DEFAULT_PRIORITY_FEE_PER_GAS: u64 = 100; pub const SRSORDER: u32 = 268435456; // 2 ^ 28 + pub const G1POINT: &'static str = "g1.point"; + pub const G2POINT: &'static str = "g2.point.powerOf2"; + pub const POINT_SIZE: u32 = 32; - async fn save_point(url: String, point: String) -> Result<(), VerificationError> { - let url = Url::parse(&url).map_err(|_| VerificationError::LinkError)?; + async fn save_point(url: Url, point: String) -> Result<(), VerificationError> { let response = reqwest::get(url) .await .map_err(|_| VerificationError::LinkError)?; @@ -111,17 +113,21 @@ impl Verifier { } let path = format!("./{}", point); let path = Path::new(&path); - let mut file = File::create(path).map_err(|_| VerificationError::LinkError)?; + let mut file = File::create(path) + .await + .map_err(|_| VerificationError::LinkError)?; let content = response .bytes() .await .map_err(|_| VerificationError::LinkError)?; - copy(&mut content.as_ref(), &mut file).map_err(|_| VerificationError::LinkError)?; + file.write_all(&content) + .await + .map_err(|_| VerificationError::LinkError)?; Ok(()) } - async fn save_points(url_g1: String, url_g2: String) -> Result { - Self::save_point(url_g1.clone(), "g1.point".to_string()).await?; - Self::save_point(url_g2.clone(), "g2.point.powerOf2".to_string()).await?; + async fn save_points(url_g1: Url, url_g2: Url) -> Result { + Self::save_point(url_g1, Self::G1POINT.to_string()).await?; + Self::save_point(url_g2, Self::G2POINT.to_string()).await?; Ok(".".to_string()) } @@ -129,20 +135,28 @@ impl Verifier { cfg: VerifierConfig, signing_client: T, ) -> Result { - let srs_points_to_load = cfg.max_blob_size / 32; + let srs_points_to_load = cfg.max_blob_size / Self::POINT_SIZE; let path = Self::save_points(cfg.clone().g1_url, cfg.clone().g2_url).await?; - let kzg = Kzg::setup( - &format!("{}{}", path, "/g1.point"), - "", - &format!("{}{}", path, "/g2.point.powerOf2"), - Self::SRSORDER, - srs_points_to_load, - "".to_string(), - ); - let kzg = kzg.map_err(|e| { - tracing::error!("Failed to setup KZG: {:?}", e); - VerificationError::KzgError - })?; + let kzg_handle = tokio::task::spawn_blocking(move || { + Kzg::setup( + &format!("{}/{}", path, Self::G1POINT), + "", + &format!("{}/{}", path, Self::G2POINT), + Self::SRSORDER, + srs_points_to_load, + "".to_string(), + ) + }); + let kzg = kzg_handle + .await + .map_err(|e| { + tracing::error!("Failed to setup KZG: {:?}", e); + VerificationError::KzgError + })? + .map_err(|e| { + tracing::error!("Failed to setup KZG: {:?}", e); + VerificationError::KzgError + })?; Ok(Self { kzg, @@ -182,9 +196,9 @@ impl Verifier { Ok(()) } - pub fn hash_encode_blob_header(&self, blob_header: BlobHeader) -> Vec { + pub fn hash_encode_blob_header(&self, blob_header: &BlobHeader) -> Vec { let mut blob_quorums = vec![]; - for quorum in blob_header.blob_quorum_params { + for quorum in &blob_header.blob_quorum_params { let quorum = Token::Tuple(vec![ Token::Uint(ethabi::Uint::from(quorum.quorum_number)), Token::Uint(ethabi::Uint::from(quorum.adversary_threshold_percentage)), @@ -203,12 +217,7 @@ impl Verifier { ]); let encoded = encode(&[blob_header]); - - let mut keccak = Keccak::v256(); - keccak.update(&encoded); - let mut hash = [0u8; 32]; - keccak.finalize(&mut hash); - hash.to_vec() + web3::keccak256(&encoded).to_vec() } pub fn process_inclusion_proof( @@ -223,23 +232,15 @@ impl Verifier { } let mut computed_hash = leaf.to_vec(); for i in 0..proof.len() / 32 { - let mut combined = proof[i * 32..(i + 1) * 32] - .iter() - .chain(computed_hash.iter()) - .cloned() - .collect::>(); + let mut buffer = [0u8; 64]; if index % 2 == 0 { - combined = computed_hash - .iter() - .chain(proof[i * 32..(i + 1) * 32].iter()) - .cloned() - .collect::>(); - }; - let mut keccak = Keccak::v256(); - keccak.update(&combined); - let mut hash = [0u8; 32]; - keccak.finalize(&mut hash); - computed_hash = hash.to_vec(); + buffer[..32].copy_from_slice(&computed_hash); + buffer[32..].copy_from_slice(&proof[i * 32..(i + 1) * 32]); + } else { + buffer[..32].copy_from_slice(&proof[i * 32..(i + 1) * 32]); + buffer[32..].copy_from_slice(&computed_hash); + } + computed_hash = web3::keccak256(&buffer).to_vec(); index /= 2; } @@ -247,26 +248,23 @@ impl Verifier { } /// Verifies the certificate's batch root - pub fn verify_merkle_proof(&self, cert: BlobInfo) -> Result<(), VerificationError> { - let inclusion_proof = cert.blob_verification_proof.inclusion_proof; - let root = cert + pub fn verify_merkle_proof(&self, cert: &BlobInfo) -> Result<(), VerificationError> { + let inclusion_proof = &cert.blob_verification_proof.inclusion_proof; + let root = &cert .blob_verification_proof .batch_medatada .batch_header .batch_root; let blob_index = cert.blob_verification_proof.blob_index; - let blob_header = cert.blob_header; + let blob_header = &cert.blob_header; let blob_header_hash = self.hash_encode_blob_header(blob_header); - let mut keccak = Keccak::v256(); - keccak.update(&blob_header_hash); - let mut leaf_hash = [0u8; 32]; - keccak.finalize(&mut leaf_hash); + let leaf_hash = web3::keccak256(&blob_header_hash).to_vec(); let generated_root = - self.process_inclusion_proof(&inclusion_proof, &leaf_hash, blob_index)?; + self.process_inclusion_proof(inclusion_proof, &leaf_hash, blob_index)?; - if generated_root != root { + if generated_root != *root { return Err(VerificationError::DifferentRoots); } Ok(()) @@ -274,39 +272,29 @@ impl Verifier { fn hash_batch_metadata( &self, - batch_header: BatchHeader, - signatory_record_hash: Vec, + batch_header: &BatchHeader, + signatory_record_hash: &[u8], confirmation_block_number: u32, ) -> Vec { let batch_header_token = Token::Tuple(vec![ - Token::FixedBytes(batch_header.batch_root), - Token::Bytes(batch_header.quorum_numbers), - Token::Bytes(batch_header.quorum_signed_percentages), + Token::FixedBytes(batch_header.batch_root.clone()), // Clone only where necessary + Token::Bytes(batch_header.quorum_numbers.clone()), + Token::Bytes(batch_header.quorum_signed_percentages.clone()), Token::Uint(ethabi::Uint::from(batch_header.reference_block_number)), ]); let encoded = encode(&[batch_header_token]); - - let mut keccak = Keccak::v256(); - keccak.update(&encoded); - let mut header_hash = [0u8; 32]; - keccak.finalize(&mut header_hash); + let header_hash = web3::keccak256(&encoded).to_vec(); let hash_token = Token::Tuple(vec![ Token::FixedBytes(header_hash.to_vec()), - Token::FixedBytes(signatory_record_hash), + Token::FixedBytes(signatory_record_hash.to_owned()), // Clone only if required ]); let mut hash_encoded = encode(&[hash_token]); hash_encoded.append(&mut confirmation_block_number.to_be_bytes().to_vec()); - - let mut keccak = Keccak::v256(); - keccak.update(&hash_encoded); - let mut hash = [0u8; 32]; - keccak.finalize(&mut hash); - - hash.to_vec() + web3::keccak256(&hash_encoded).to_vec() } /// Retrieves the block to make the request to the service manager @@ -319,15 +307,17 @@ impl Verifier { .map_err(|_| VerificationError::ServiceManagerError)? .as_u64(); - if self.cfg.settlement_layer_confirmation_depth == 0 { - return Ok(latest); - } - Ok(latest - (self.cfg.settlement_layer_confirmation_depth as u64 - 1)) + let depth = self + .cfg + .settlement_layer_confirmation_depth + .saturating_sub(1); + let block_to_return = latest.saturating_sub(depth as u64); + Ok(block_to_return) } async fn call_batch_id_to_metadata_hash( &self, - blob_info: BlobInfo, + blob_info: &BlobInfo, ) -> Result, VerificationError> { let context_block = self.get_context_block().await?; @@ -339,10 +329,7 @@ impl Verifier { data.append(batch_id_vec.to_vec().as_mut()); let call_request = CallRequest { - to: Some( - H160::from_str(&self.cfg.svc_manager_addr) - .map_err(|_| VerificationError::ServiceManagerError)?, - ), + to: Some(self.cfg.svc_manager_addr), data: Some(zksync_basic_types::web3::Bytes(data)), ..Default::default() }; @@ -361,21 +348,19 @@ impl Verifier { } /// Verifies the certificate batch hash - pub async fn verify_batch(&self, blob_info: BlobInfo) -> Result<(), VerificationError> { - let expected_hash = self - .call_batch_id_to_metadata_hash(blob_info.clone()) - .await?; + pub async fn verify_batch(&self, blob_info: &BlobInfo) -> Result<(), VerificationError> { + let expected_hash = self.call_batch_id_to_metadata_hash(blob_info).await?; if expected_hash == vec![0u8; 32] { return Err(VerificationError::EmptyHash); } let actual_hash = self.hash_batch_metadata( - blob_info + &blob_info .blob_verification_proof .batch_medatada .batch_header, - blob_info + &blob_info .blob_verification_proof .batch_medatada .signatory_record_hash, @@ -391,47 +376,17 @@ impl Verifier { Ok(()) } - fn decode_bytes(&self, encoded: Vec) -> Result, String> { - // Ensure the input has at least 64 bytes (offset + length) - if encoded.len() < 64 { - return Err("Encoded data is too short".to_string()); - } - - // Read the offset (first 32 bytes) - let offset = { - let mut offset_bytes = [0u8; 32]; - offset_bytes.copy_from_slice(&encoded[0..32]); - usize::from_be_bytes( - offset_bytes[24..32] - .try_into() - .map_err(|_| "Offset is too large")?, - ) - }; - - // Check if offset is valid - if offset + 32 > encoded.len() { - return Err("Offset points outside the encoded data".to_string()); - } - - // Read the length (32 bytes at the offset position) - let length = { - let mut length_bytes = [0u8; 32]; - length_bytes.copy_from_slice(&encoded[offset..offset + 32]); - usize::from_be_bytes( - length_bytes[24..32] - .try_into() - .map_err(|_| "Offset is too large")?, - ) - }; - - // Check if the length is valid - if offset + 32 + length > encoded.len() { - return Err("Length extends beyond the encoded data".to_string()); + fn decode_bytes(&self, encoded: Vec) -> Result, VerificationError> { + let output_type = [ParamType::Bytes]; + let tokens: Vec = ethabi::decode(&output_type, &encoded) + .map_err(|_| VerificationError::ServiceManagerError)?; + let token = tokens + .first() + .ok_or(VerificationError::ServiceManagerError)?; + match token { + Token::Bytes(data) => Ok(data.to_vec()), + _ => Err(VerificationError::ServiceManagerError), } - - // Extract the bytes data - let data = encoded[offset + 32..offset + 32 + length].to_vec(); - Ok(data) } async fn get_quorum_adversary_threshold( @@ -442,10 +397,7 @@ impl Verifier { let data = func_selector.to_vec(); let call_request = CallRequest { - to: Some( - H160::from_str(&self.cfg.svc_manager_addr) - .map_err(|_| VerificationError::ServiceManagerError)?, - ), + to: Some(self.cfg.svc_manager_addr), data: Some(zksync_basic_types::web3::Bytes(data)), ..Default::default() }; @@ -457,9 +409,7 @@ impl Verifier { .await .map_err(|_| VerificationError::ServiceManagerError)?; - let percentages = self - .decode_bytes(res.0.to_vec()) - .map_err(|_| VerificationError::ServiceManagerError)?; + let percentages = self.decode_bytes(res.0.to_vec())?; if percentages.len() > quorum_number as usize { return Ok(percentages[quorum_number as usize]); @@ -471,10 +421,7 @@ impl Verifier { let func_selector = ethabi::short_signature("quorumNumbersRequired", &[]); let data = func_selector.to_vec(); let call_request = CallRequest { - to: Some( - H160::from_str(&self.cfg.svc_manager_addr) - .map_err(|_| VerificationError::ServiceManagerError)?, - ), + to: Some(self.cfg.svc_manager_addr), data: Some(zksync_basic_types::web3::Bytes(data)), ..Default::default() }; @@ -487,13 +434,12 @@ impl Verifier { .map_err(|_| VerificationError::ServiceManagerError)?; self.decode_bytes(res.0.to_vec()) - .map_err(|_| VerificationError::ServiceManagerError) } /// Verifies that the certificate's blob quorum params are correct - pub async fn verify_security_params(&self, cert: BlobInfo) -> Result<(), VerificationError> { - let blob_header = cert.blob_header; - let batch_header = cert.blob_verification_proof.batch_medatada.batch_header; + pub async fn verify_security_params(&self, cert: &BlobInfo) -> Result<(), VerificationError> { + let blob_header = &cert.blob_header; + let batch_header = &cert.blob_verification_proof.batch_medatada.batch_header; let mut confirmed_quorums: HashMap = HashMap::new(); for i in 0..blob_header.blob_quorum_params.len() { @@ -542,9 +488,9 @@ impl Verifier { &self, cert: BlobInfo, ) -> Result<(), VerificationError> { - self.verify_batch(cert.clone()).await?; - self.verify_merkle_proof(cert.clone())?; - self.verify_security_params(cert.clone()).await?; + self.verify_batch(&cert).await?; + self.verify_merkle_proof(&cert)?; + self.verify_security_params(&cert).await?; Ok(()) } } diff --git a/core/node/da_clients/src/eigen/verifier_tests.rs b/core/node/da_clients/src/eigen/verifier_tests.rs index bdea8f9a9960..35c78dd8d9d3 100644 --- a/core/node/da_clients/src/eigen/verifier_tests.rs +++ b/core/node/da_clients/src/eigen/verifier_tests.rs @@ -2,11 +2,12 @@ mod test { use std::{collections::HashMap, str::FromStr}; + use url::Url; use zksync_eth_client::{clients::PKSigningClient, EnrichedClientResult}; use zksync_types::{ url::SensitiveUrl, web3::{BlockId, Bytes, CallRequest}, - K256PrivateKey, SLChainId, H160, U64, + Address, K256PrivateKey, SLChainId, H160, U64, }; use zksync_web3_decl::client::{Client, DynClient, L1}; @@ -21,10 +22,10 @@ mod test { fn get_verifier_config() -> VerifierConfig { VerifierConfig { rpc_url: "https://ethereum-holesky-rpc.publicnode.com".to_string(), - svc_manager_addr: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + svc_manager_addr: Address::from_str("0xD4A7E1Bd8015057293f0D0A557088c286942e84b").unwrap(), max_blob_size: 2 * 1024 * 1024, - g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), - g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + g1_url: Url::parse("https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point").unwrap(), + g2_url: Url::parse("https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2").unwrap(), settlement_layer_confirmation_depth: 0, private_key: "0xd08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6" .to_string(), @@ -82,9 +83,7 @@ mod test { ) .map_err(|_| VerificationError::ServiceManagerError) .unwrap(), - zksync_types::H160::from_str(&cfg.svc_manager_addr) - .map_err(|_| VerificationError::ServiceManagerError) - .unwrap(), + cfg.svc_manager_addr, Verifier::DEFAULT_PRIORITY_FEE_PER_GAS, SLChainId(cfg.chain_id), query_client, @@ -212,7 +211,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_merkle_proof(cert); + let result = verifier.verify_merkle_proof(&cert); assert!(result.is_ok()); } @@ -295,7 +294,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_merkle_proof(cert); + let result = verifier.verify_merkle_proof(&cert); assert!(result.is_ok()); } @@ -332,7 +331,7 @@ mod test { }, ], }; - let result = verifier.hash_encode_blob_header(blob_header); + let result = verifier.hash_encode_blob_header(&blob_header); let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; assert_eq!(result, hex::decode(expected).unwrap()); } @@ -371,7 +370,7 @@ mod test { }, ], }; - let result = verifier.hash_encode_blob_header(blob_header); + let result = verifier.hash_encode_blob_header(&blob_header); let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; assert_eq!(result, hex::decode(expected).unwrap()); } @@ -495,7 +494,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_batch(cert).await; + let result = verifier.verify_batch(&cert).await; assert!(result.is_ok()); } @@ -603,7 +602,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_batch(cert).await; + let result = verifier.verify_batch(&cert).await; assert!(result.is_ok()); } @@ -685,7 +684,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_security_params(cert).await; + let result = verifier.verify_security_params(&cert).await; assert!(result.is_ok()); } @@ -810,7 +809,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_security_params(cert).await; + let result = verifier.verify_security_params(&cert).await; assert!(result.is_ok()); } } diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 2f46419f7972..10a895df4956 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -83,7 +83,6 @@ impl DataAvailabilityDispatcher { for batch in batches { let dispatch_latency = METRICS.blob_dispatch_latency.start(); - METRICS.blobs_pending_dispatch.inc_by(1); let dispatch_response = retry(self.config.max_retries(), batch.l1_batch_number, || { self.client .dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone()) @@ -115,7 +114,6 @@ impl DataAvailabilityDispatcher { .set(batch.l1_batch_number.0 as usize); METRICS.blob_size.observe(batch.pubdata.len()); METRICS.blobs_dispatched.inc_by(1); - METRICS.blobs_pending_dispatch.dec_by(1); tracing::info!( "Dispatched a DA for batch_number: {}, pubdata_size: {}, dispatch_latency: {dispatch_latency_duration:?}", diff --git a/core/node/da_dispatcher/src/metrics.rs b/core/node/da_dispatcher/src/metrics.rs index 4c21e556abe1..2f469c865b6c 100644 --- a/core/node/da_dispatcher/src/metrics.rs +++ b/core/node/da_dispatcher/src/metrics.rs @@ -19,8 +19,6 @@ pub(super) struct DataAvailabilityDispatcherMetrics { /// Buckets are bytes ranging from 1 KB to 16 MB, which has to satisfy all blob size values. #[metrics(buckets = Buckets::exponential(1_024.0..=16.0 * 1_024.0 * 1_024.0, 2.0), unit = Unit::Bytes)] pub blob_size: Histogram, - /// Amount of pending blobs to be dispatched. - pub blobs_pending_dispatch: Gauge, /// Total number of blobs dispatched. pub blobs_dispatched: Gauge, /// Total number of blobs included. diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs index 79abf5d0deee..515c2bb3d834 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs @@ -47,7 +47,7 @@ impl WiringLayer for EigenWiringLayer { } async fn wire(self, input: Self::Input) -> Result { - let master_pool = input.master_pool.get_custom(2).await?; + let master_pool = input.master_pool.get().await?; let get_blob_from_db = GetBlobFromDB { pool: master_pool }; let client: Box = Box::new( EigenClient::new(self.config, self.secrets, Box::new(get_blob_from_db)).await?, @@ -66,15 +66,16 @@ pub struct GetBlobFromDB { #[async_trait::async_trait] impl GetBlobData for GetBlobFromDB { - async fn call(&self, input: &'_ str) -> anyhow::Result>> { - let pool = self.pool.clone(); - let input = input.to_string(); - let mut conn = pool.connection_tagged("da_dispatcher").await?; + async fn get_blob_data(&self, input: &str) -> anyhow::Result>> { + let mut conn = self.pool.connection_tagged("eigen_client").await?; let batch = conn .data_availability_dal() - .get_blob_data_by_blob_id(&input) + .get_blob_data_by_blob_id(input) .await?; - drop(conn); Ok(batch.map(|b| b.pubdata)) } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } } diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 991ecee699c3..cf354891236b 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -206,6 +206,21 @@ impl StateKeeperIO for MempoolIO { .protocol_version_id_by_timestamp(timestamp) .await .context("Failed loading protocol version")?; + let previous_protocol_version = storage + .blocks_dal() + .pending_protocol_version() + .await + .context("Failed loading previous protocol version")?; + let batch_with_upgrade_tx = if previous_protocol_version != protocol_version { + storage + .protocol_versions_dal() + .get_protocol_upgrade_tx(protocol_version) + .await + .context("Failed loading protocol upgrade tx")? + .is_some() + } else { + false + }; drop(storage); // We create a new filter each time, since parameters may change and a previously @@ -217,7 +232,8 @@ impl StateKeeperIO for MempoolIO { .await .context("failed creating L2 transaction filter")?; - if !self.mempool.has_next(&self.filter) { + // We do not populate mempool with upgrade tx so it should be checked separately. + if !batch_with_upgrade_tx && !self.mempool.has_next(&self.filter) { tokio::time::sleep(self.delay_interval).await; continue; } diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 15164328b7a0..536efe82804a 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -18,6 +18,8 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, PubdataParams}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, l2::L2Tx, + protocol_upgrade::ProtocolUpgradeTx, + protocol_version::ProtocolSemanticVersion, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, ProtocolVersionId, StorageKey, TransactionTimeRangeConstraint, H256, U256, }; @@ -848,6 +850,52 @@ async fn test_mempool_with_timestamp_assertion() { ); } +#[tokio::test] +async fn test_batch_params_with_protocol_upgrade_tx() { + let connection_pool = ConnectionPool::::constrained_test_pool(2).await; + let tester = Tester::new(L1BatchCommitmentMode::Rollup); + // Genesis is needed for proper mempool initialization. + tester.genesis(&connection_pool).await; + + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool.clone()).await; + let (cursor, _) = mempool.initialize().await.unwrap(); + + // Check that new batch params are not returned when there is no tx to process. + let new_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_millis(100)) + .await + .unwrap(); + assert!(new_batch_params.is_none()); + + // Insert protocol version with upgrade tx. + let protocol_upgrade_tx = ProtocolUpgradeTx { + execute: Default::default(), + common_data: Default::default(), + received_timestamp_ms: 0, + }; + let version = ProtocolVersion { + version: ProtocolSemanticVersion { + minor: ProtocolVersionId::next(), + patch: 0.into(), + }, + tx: Some(protocol_upgrade_tx), + ..Default::default() + }; + connection_pool + .connection() + .await + .unwrap() + .protocol_versions_dal() + .save_protocol_version_with_tx(&version) + .await + .unwrap(); + let new_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_millis(100)) + .await + .unwrap(); + assert!(new_batch_params.is_some()); +} + async fn insert_l2_transaction(storage: &mut Connection<'_, Core>, tx: &L2Tx) { storage .transactions_dal() diff --git a/deny.toml b/deny.toml index d72f3823761f..954017750ab2 100644 --- a/deny.toml +++ b/deny.toml @@ -9,7 +9,7 @@ feature-depth = 1 [advisories] ignore = [ "RUSTSEC-2024-0375", # atty dependency being unmaintained, dependency of clap and criterion, we would need to update to newer major of dependencies - "RUSTSEC-2020-0168", # mach dependency being unmaintained, dependency in consensus, we should consider moving to mach2 fork + "RUSTSEC-2020-0168", # mach dependency being unmaintained, dependency in api server, we should consider moving to mach2 fork "RUSTSEC-2024-0370", # `cs_derive` needs to be updated to not rely on `proc-macro-error` # all below caused by StructOpt which we still use and we should move to clap v4 instead "RUSTSEC-2021-0145", @@ -24,7 +24,6 @@ allow = [ "ISC", "Unlicense", "MPL-2.0", - "Unicode-DFS-2016", "CC0-1.0", "BSD-2-Clause", "BSD-3-Clause", @@ -33,6 +32,7 @@ allow = [ "Apache-2.0 WITH LLVM-exception", "0BSD", "BSL-1.0", + "Unicode-3.0" ] confidence-threshold = 0.8 diff --git a/docs/src/specs/img/verifyinteropmsg.png b/docs/src/specs/img/verifyinteropmsg.png index c59e5607f7da..031fe811207b 100644 Binary files a/docs/src/specs/img/verifyinteropmsg.png and b/docs/src/specs/img/verifyinteropmsg.png differ diff --git a/docs/src/specs/interop/interopmessages.md b/docs/src/specs/interop/interopmessages.md index cabfd0e56750..f32724e0013d 100644 --- a/docs/src/specs/interop/interopmessages.md +++ b/docs/src/specs/interop/interopmessages.md @@ -56,8 +56,9 @@ This `interopHash` serves as a globally unique identifier that can be used on an #### How do I get the proof You’ll notice that **verifyInteropMessage** has a second argument — a proof that you need to provide. This proof is a -Merkle tree proof (more details below). You can obtain it by querying the Settlement Layer (Gateway) or generating it -off-chain by examining the Gateway state on L1. +Merkle tree proof (more details below). You can obtain it by querying the +[chain](https://docs.zksync.io/build/api-reference/zks-rpc#zks_getl2tol1msgproof) , or generate it off-chain - by +looking at the chain's state on L1 #### How does the interop message differ from other layers (InteropTransactions, InteropCalls) @@ -129,49 +130,43 @@ from them at the end of the batch, and sends this tree to the SettlementLayer (G ![sendtol1.png](../img/sendtol1.png) -The Gateway will verify the hashes of the messages to ensure it has received the correct preimages. Once the proof for -the batch is submitted (or more accurately, during the "execute" step), it will add the root of the Merkle tree to its -`globalRoot`. +The settlement layer receives the messages and once the proof for the batch is submitted (or more accurately, during the +"execute" step), it will add the root of the Merkle tree to its `messageRoot` (sometimes called `globalRoot`). ![globalroot.png](../img/globalroot.png) -The `globalRoot` is the root of the Merkle tree that includes all messages from all chains. Each chain regularly reads -the globalRoot value from the Gateway to stay synchronized. +The `messageRoot` is the root of the Merkle tree that includes all messages from all chains. Each chain regularly reads +the messageRoot value from the Gateway to stay synchronized. ![gateway.png](../img/gateway.png) If a user wants to call `verifyInteropMessage` on a chain, they first need to query the Gateway for the Merkle path from -the batch they are interested in up to the `globalRoot`. Once they have this path, they can provide it as an argument +the batch they are interested in up to the `messageRoot`. Once they have this path, they can provide it as an argument when calling a method on the destination chain (such as the `openSignup` method in our example). ![proofmerklepath.png](../img/proofmerklepath.png) -#### What if the Gateway doesn’t respond +#### What if Chain doesn’t provide the proof -If the Gateway doesn’t respond, users can manually re-create the Merkle proof using data available on L1. Every +If the chain doesn’t respond, users can manually re-create the Merkle proof using data available on L1. Every interopMessage is also sent to L1. -#### Global roots change frequently +#### Message roots change frequently -Yes, global roots update continuously as new chains prove their blocks. However, chains retain historical global roots +Yes, message roots update continuously as new chains prove their blocks. However, chains retain historical message roots for a reasonable period (around 24 hours) to ensure that recently generated Merkle paths remain valid. -#### Is this secure? Could a chain operator, like Chain D, use a different global root +#### Is this secure? Could a chain operator, like Chain D, use a different message root -Yes, it’s secure. If a malicious operator on Chain D attempted to use a different global root, they wouldn’t be able to +Yes, it’s secure. If a malicious operator on Chain D attempted to use a different message root, they wouldn’t be able to submit the proof for their new batch to the Gateway. This is because the proof’s public inputs must include the valid -global root. - -#### What if the Gateway is malicious - -If the Gateway behaves maliciously, it wouldn’t be able to submit its batches to L1, as the proof would fail -verification. A separate section will cover interop transaction security in more detail. +message root. ### Other Features #### Dependency Set -- In ElasticChain, this is implicitly handled by the Gateway. Any chain that is part of the global root can exchange +- In ElasticChain, this is implicitly handled by the Gateway. Any chain that is part of the message root can exchange messages with any other chain, effectively forming an undirected graph. #### Timestamps and Expiration diff --git a/docs/src/specs/interop/overview.md b/docs/src/specs/interop/overview.md index 8ca28723e03a..4b6b7417083d 100644 --- a/docs/src/specs/interop/overview.md +++ b/docs/src/specs/interop/overview.md @@ -112,14 +112,14 @@ The step-by-step process and exact details will be covered in the next section. ## Technical Details -### How is Interop Different from a Bridge +### How does native bridging differ from a third party bridging Bridges generally fall into two categories: Native and Third-Party. #### 1. Native Bridges -Native bridges enable asset transfers “up and down” (from L2 to L1 and vice versa). In contrast, interop allows direct -transfers between different L2s. +Native bridges enable asset transfers “up and down” (from L2 to L1 and vice versa), but interop (which is also a form of +native bridging) allows you to move them between different L2s. Instead of doing a "round trip" (L2 → L1 → another L2), interop lets you move assets directly between two L2s, saving both time and cost. @@ -129,8 +129,8 @@ both time and cost. Third-party bridges enable transfers between two L2s, but they rely on their own liquidity. While you, as the user, receive assets on the destination chain instantly, these assets come from the bridge’s liquidity pool. -Bridge operators then rebalance using native bridging, which requires maintaining token reserves on both sides. This -adds costs for the bridge operators, often resulting in higher fees for users. +Bridge operators then rebalance using native bridging, which requires maintaining token reserves on both sides. Without +interop this adds costs for the bridge operators, often resulting in higher fees for users. The good news is that third-party bridges can use interop to improve their token transfers by utilizing the **InteropMessage** layer.