Skip to content

Commit

Permalink
Merge branch 'eigen-client-m0-implementation-merge-extra-features' in…
Browse files Browse the repository at this point in the history
…to eigen-client-m0-extract-client
  • Loading branch information
gianbelinche committed Dec 13, 2024
2 parents 798cf45 + 4af1a20 commit 051c661
Show file tree
Hide file tree
Showing 25 changed files with 159 additions and 138 deletions.
2 changes: 1 addition & 1 deletion .github/release-please/manifest.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"core": "25.2.0",
"core": "25.3.0",
"prover": "17.1.1",
"zkstack_cli": "0.1.2"
}
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

22 changes: 22 additions & 0 deletions core/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,27 @@
# Changelog

## [25.3.0](https://github.com/matter-labs/zksync-era/compare/core-v25.2.0...core-v25.3.0) (2024-12-11)


### Features

* change seal criteria for gateway ([#3320](https://github.com/matter-labs/zksync-era/issues/3320)) ([a0a74aa](https://github.com/matter-labs/zksync-era/commit/a0a74aaeb42f076d20c4ae8a32925eff2de11d0c))
* **contract-verifier:** Download compilers from GH automatically ([#3291](https://github.com/matter-labs/zksync-era/issues/3291)) ([a10c4ba](https://github.com/matter-labs/zksync-era/commit/a10c4baa312f26ebac2a10115fb7bd314d18b9c1))
* integrate gateway changes for some components ([#3274](https://github.com/matter-labs/zksync-era/issues/3274)) ([cbc91e3](https://github.com/matter-labs/zksync-era/commit/cbc91e35f84d04f2e4c8e81028596db009e478d1))
* **proof-data-handler:** exclude batches without object file in GCS ([#2980](https://github.com/matter-labs/zksync-era/issues/2980)) ([3e309e0](https://github.com/matter-labs/zksync-era/commit/3e309e06b24649c74bfe120e8ca45247cb2b5628))
* **pruning:** Record L1 batch root hash in pruning logs ([#3266](https://github.com/matter-labs/zksync-era/issues/3266)) ([7b6e590](https://github.com/matter-labs/zksync-era/commit/7b6e59083cf0cafeaef5dd4b2dd39257ff91316d))
* **state-keeper:** mempool io opens batch if there is protocol upgrade tx ([#3360](https://github.com/matter-labs/zksync-era/issues/3360)) ([f6422cd](https://github.com/matter-labs/zksync-era/commit/f6422cd59dab2c105bb7c125c172f2621fe39464))
* **tee:** add error handling for unstable_getTeeProofs API endpoint ([#3321](https://github.com/matter-labs/zksync-era/issues/3321)) ([26f630c](https://github.com/matter-labs/zksync-era/commit/26f630cb75958c711d67d13bc77ddbb1117156c3))
* **zksync_cli:** Health checkpoint improvements ([#3193](https://github.com/matter-labs/zksync-era/issues/3193)) ([440fe8d](https://github.com/matter-labs/zksync-era/commit/440fe8d8afdf0fc2768692a1b40b0910873e2faf))


### Bug Fixes

* **api:** batch fee input scaling for `debug_traceCall` ([#3344](https://github.com/matter-labs/zksync-era/issues/3344)) ([7ace594](https://github.com/matter-labs/zksync-era/commit/7ace594fb3140212bd94ffd6bffcac99805cf4b1))
* **tee:** correct previous fix for race condition in batch locking ([#3358](https://github.com/matter-labs/zksync-era/issues/3358)) ([b12da8d](https://github.com/matter-labs/zksync-era/commit/b12da8d1fddc7870bf17d5e08312d20773815269))
* **tee:** fix race condition in batch locking ([#3342](https://github.com/matter-labs/zksync-era/issues/3342)) ([a7dc0ed](https://github.com/matter-labs/zksync-era/commit/a7dc0ed5007f6b2f789f4c61cb3d137843151860))
* **tracer:** adds vm error to flatCallTracer error field if exists ([#3374](https://github.com/matter-labs/zksync-era/issues/3374)) ([5d77727](https://github.com/matter-labs/zksync-era/commit/5d77727cd3ba5f4d84643fee1873f03656310b4d))

## [25.2.0](https://github.com/matter-labs/zksync-era/compare/core-v25.1.0...core-v25.2.0) (2024-11-19)


Expand Down
2 changes: 1 addition & 1 deletion core/bin/external_node/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "zksync_external_node"
description = "Non-validator ZKsync node"
version = "25.2.0" # x-release-please-version
version = "25.3.0" # x-release-please-version
edition.workspace = true
authors.workspace = true
homepage.workspace = true
Expand Down
6 changes: 5 additions & 1 deletion core/bin/zksync_server/src/node_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -525,6 +525,7 @@ impl MainNodeBuilder {
};

let secrets = try_load_config!(self.secrets.data_availability);
let l1_secrets = try_load_config!(self.secrets.l1);
match (da_client_config, secrets) {
(DAClientConfig::Avail(config), DataAvailabilitySecrets::Avail(secret)) => {
self.node.add_layer(AvailWiringLayer::new(config, secret));
Expand All @@ -535,7 +536,10 @@ impl MainNodeBuilder {
.add_layer(CelestiaWiringLayer::new(config, secret));
}

(DAClientConfig::Eigen(config), DataAvailabilitySecrets::Eigen(secret)) => {
(DAClientConfig::Eigen(mut config), DataAvailabilitySecrets::Eigen(secret)) => {
if config.eigenda_eth_rpc.is_none() {
config.eigenda_eth_rpc = Some(l1_secrets.l1_rpc_url.expose_str().to_string());
}
self.node.add_layer(EigenWiringLayer::new(config, secret));
}

Expand Down
2 changes: 1 addition & 1 deletion core/lib/config/src/configs/da_client/eigen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ pub struct EigenConfig {
/// a value less or equal to 0 means that the disperser will not wait for finalization
pub settlement_layer_confirmation_depth: i32,
/// URL of the Ethereum RPC server
pub eigenda_eth_rpc: String,
pub eigenda_eth_rpc: Option<String>,
/// Address of the service manager contract
pub eigenda_svc_manager_address: String,
/// Wait for the blob to be finalized before returning the response
Expand Down
4 changes: 0 additions & 4 deletions core/lib/config/src/configs/da_dispatcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000;
pub const DEFAULT_MAX_ROWS_TO_DISPATCH: u32 = 100;
pub const DEFAULT_MAX_RETRIES: u16 = 5;
pub const DEFAULT_USE_DUMMY_INCLUSION_DATA: bool = false;
pub const DEFAULT_MAX_CONCURRENT_REQUESTS: u32 = 100;

#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct DADispatcherConfig {
Expand All @@ -20,8 +19,6 @@ pub struct DADispatcherConfig {
// TODO: run a verification task to check if the L1 contract expects the inclusion proofs to
// avoid the scenario where contracts expect real proofs, and server is using dummy proofs.
pub use_dummy_inclusion_data: Option<bool>,
/// The maximun number of concurrent request to send to the DA server.
pub max_concurrent_requests: Option<u32>,
}

impl DADispatcherConfig {
Expand All @@ -31,7 +28,6 @@ impl DADispatcherConfig {
max_rows_to_dispatch: Some(DEFAULT_MAX_ROWS_TO_DISPATCH),
max_retries: Some(DEFAULT_MAX_RETRIES),
use_dummy_inclusion_data: Some(DEFAULT_USE_DUMMY_INCLUSION_DATA),
max_concurrent_requests: Some(DEFAULT_MAX_CONCURRENT_REQUESTS),
}
}

Expand Down
1 change: 0 additions & 1 deletion core/lib/config/src/testonly.rs
Original file line number Diff line number Diff line change
Expand Up @@ -972,7 +972,6 @@ impl Distribution<configs::da_dispatcher::DADispatcherConfig> for EncodeDist {
max_rows_to_dispatch: self.sample(rng),
max_retries: self.sample(rng),
use_dummy_inclusion_data: self.sample(rng),
max_concurrent_requests: self.sample(rng),
}
}
}
Expand Down
39 changes: 0 additions & 39 deletions core/lib/dal/src/data_availability_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,45 +175,6 @@ impl DataAvailabilityDal<'_, '_> {
.map(DataAvailabilityBlob::from))
}

pub async fn get_da_blob_ids_awaiting_inclusion(
&mut self,
) -> DalResult<Vec<Option<DataAvailabilityBlob>>> {
let rows = sqlx::query!(
r#"
SELECT
l1_batch_number,
blob_id,
inclusion_data,
sent_at
FROM
data_availability
WHERE
inclusion_data IS NULL
ORDER BY
l1_batch_number
"#,
)
.instrument("get_da_blobs_awaiting_inclusion")
.fetch_all(self.storage)
.await?;

Ok(rows
.into_iter()
.map(|row| {
let l1_batch_number_u32 = row.l1_batch_number.try_into();
if let Ok(l1_batch_number) = l1_batch_number_u32 {
Some(DataAvailabilityBlob {
l1_batch_number: L1BatchNumber(l1_batch_number),
blob_id: row.blob_id,
inclusion_data: row.inclusion_data,
sent_at: row.sent_at.and_utc(),
})
} else {
None
}
})
.collect())
}
/// Fetches the pubdata and `l1_batch_number` for the L1 batches that are ready for DA dispatch.
pub async fn get_ready_for_da_dispatch_l1_batches(
&mut self,
Expand Down
59 changes: 21 additions & 38 deletions core/lib/env_config/src/da_client.rs
Original file line number Diff line number Diff line change
@@ -1,21 +1,17 @@
use std::env;

use zksync_config::{
configs::{
da_client::{
avail::{
AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME,
AVAIL_GAS_RELAY_CLIENT_NAME,
},
celestia::CelestiaSecrets,
eigen::EigenSecrets,
DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME,
EIGEN_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME,
use zksync_config::configs::{
da_client::{
avail::{
AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME,
},
secrets::DataAvailabilitySecrets,
AvailConfig,
celestia::CelestiaSecrets,
eigen::EigenSecrets,
DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME,
EIGEN_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME,
},
EigenConfig,
secrets::DataAvailabilitySecrets,
AvailConfig,
};

use crate::{envy_load, FromEnv};
Expand All @@ -38,20 +34,7 @@ impl FromEnv for DAClientConfig {
},
}),
CELESTIA_CLIENT_CONFIG_NAME => Self::Celestia(envy_load("da_celestia_config", "DA_")?),
EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(EigenConfig {
disperser_rpc: env::var("EIGENDA_DISPERSER_RPC")?,
settlement_layer_confirmation_depth: env::var(
"EIGENDA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH",
)?
.parse()?,
eigenda_eth_rpc: env::var("EIGENDA_EIGENDA_ETH_RPC")?,
eigenda_svc_manager_address: env::var("EIGENDA_EIGENDA_SVC_MANAGER_ADDRESS")?,
wait_for_finalization: env::var("EIGENDA_WAIT_FOR_FINALIZATION")?.parse()?,
authenticated: env::var("EIGENDA_AUTHENTICATED")?.parse()?,
g1_url: env::var("EIGENDA_G1_URL")?.parse()?,
g2_url: env::var("EIGENDA_G2_URL")?.parse()?,
chain_id: env::var("EIGENDA_CHAIN_ID")?.parse()?,
}),
EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(envy_load("da_eigen_config", "DA_")?),
OBJECT_STORE_CLIENT_CONFIG_NAME => {
Self::ObjectStore(envy_load("da_object_store", "DA_")?)
}
Expand Down Expand Up @@ -265,15 +248,15 @@ mod tests {
let mut lock = MUTEX.lock();
let config = r#"
DA_CLIENT="Eigen"
EIGENDA_DISPERSER_RPC="http://localhost:8080"
EIGENDA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH=0
EIGENDA_EIGENDA_ETH_RPC="http://localhost:8545"
EIGENDA_EIGENDA_SVC_MANAGER_ADDRESS="0x123"
EIGENDA_WAIT_FOR_FINALIZATION=true
EIGENDA_AUTHENTICATED=false
EIGENDA_G1_URL="resources1"
EIGENDA_G2_URL="resources2"
EIGENDA_CHAIN_ID=1
DA_DISPERSER_RPC="http://localhost:8080"
DA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH=0
DA_EIGENDA_ETH_RPC="http://localhost:8545"
DA_EIGENDA_SVC_MANAGER_ADDRESS="0x123"
DA_WAIT_FOR_FINALIZATION=true
DA_AUTHENTICATED=false
DA_G1_URL="resources1"
DA_G2_URL="resources2"
DA_CHAIN_ID=1
"#;
lock.set_env(config);

Expand All @@ -283,7 +266,7 @@ mod tests {
DAClientConfig::Eigen(EigenConfig {
disperser_rpc: "http://localhost:8080".to_string(),
settlement_layer_confirmation_depth: 0,
eigenda_eth_rpc: "http://localhost:8545".to_string(),
eigenda_eth_rpc: Some("http://localhost:8545".to_string()),
eigenda_svc_manager_address: "0x123".to_string(),
wait_for_finalization: true,
authenticated: false,
Expand Down
5 changes: 1 addition & 4 deletions core/lib/env_config/src/da_dispatcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,12 @@ mod tests {
interval: u32,
rows_limit: u32,
max_retries: u16,
max_concurrent_requests: u32,
) -> DADispatcherConfig {
DADispatcherConfig {
polling_interval_ms: Some(interval),
max_rows_to_dispatch: Some(rows_limit),
max_retries: Some(max_retries),
use_dummy_inclusion_data: Some(true),
max_concurrent_requests: Some(max_concurrent_requests),
}
}

Expand All @@ -40,10 +38,9 @@ mod tests {
DA_DISPATCHER_MAX_ROWS_TO_DISPATCH=60
DA_DISPATCHER_MAX_RETRIES=7
DA_DISPATCHER_USE_DUMMY_INCLUSION_DATA="true"
DA_DISPATCHER_MAX_CONCURRENT_REQUESTS=10
"#;
lock.set_env(config);
let actual = DADispatcherConfig::from_env().unwrap();
assert_eq!(actual, expected_da_layer_config(5000, 60, 7, 10));
assert_eq!(actual, expected_da_layer_config(5000, 60, 7));
}
}
6 changes: 2 additions & 4 deletions core/lib/protobuf_config/src/da_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,7 @@ impl ProtoRepr for proto::DataAvailabilityClient {
&conf.settlement_layer_confirmation_depth,
)
.context("settlement_layer_confirmation_depth")?,
eigenda_eth_rpc: required(&conf.eigenda_eth_rpc)
.context("eigenda_eth_rpc")?
.clone(),
eigenda_eth_rpc: required(&conf.eigenda_eth_rpc).ok().cloned(),
eigenda_svc_manager_address: required(&conf.eigenda_svc_manager_address)
.context("eigenda_svc_manager_address")?
.clone(),
Expand Down Expand Up @@ -117,7 +115,7 @@ impl ProtoRepr for proto::DataAvailabilityClient {
settlement_layer_confirmation_depth: Some(
config.settlement_layer_confirmation_depth,
),
eigenda_eth_rpc: Some(config.eigenda_eth_rpc.clone()),
eigenda_eth_rpc: config.eigenda_eth_rpc.clone(),
eigenda_svc_manager_address: Some(config.eigenda_svc_manager_address.clone()),
wait_for_finalization: Some(config.wait_for_finalization),
authenticated: Some(config.authenticated),
Expand Down
2 changes: 0 additions & 2 deletions core/lib/protobuf_config/src/da_dispatcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher {
max_rows_to_dispatch: self.max_rows_to_dispatch,
max_retries: self.max_retries.map(|x| x as u16),
use_dummy_inclusion_data: self.use_dummy_inclusion_data,
max_concurrent_requests: self.max_concurrent_requests,
})
}

Expand All @@ -22,7 +21,6 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher {
max_rows_to_dispatch: this.max_rows_to_dispatch,
max_retries: this.max_retries.map(Into::into),
use_dummy_inclusion_data: this.use_dummy_inclusion_data,
max_concurrent_requests: this.max_concurrent_requests,
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,4 @@ message DataAvailabilityDispatcher {
optional uint32 max_rows_to_dispatch = 2;
optional uint32 max_retries = 3;
optional bool use_dummy_inclusion_data = 4;
optional uint32 max_concurrent_requests = 5;
}
16 changes: 11 additions & 5 deletions core/node/api_server/src/web3/namespaces/debug.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,16 +96,22 @@ impl DebugNamespace {
CallType::NearCall => unreachable!("We have to filter our near calls before"),
};

let (result, error) = if let Some(error) = call.revert_reason {
(None, Some(error))
} else {
(
let (result, error) = match (call.revert_reason, call.error) {
(Some(revert_reason), _) => {
// If revert_reason exists, it takes priority over VM error
(None, Some(revert_reason))
}
(None, Some(vm_error)) => {
// If no revert_reason but VM error exists
(None, Some(vm_error))
}
(None, None) => (
Some(CallResult {
output: web3::Bytes::from(call.output),
gas_used: U256::from(call.gas_used),
}),
None,
)
),
};

calls.push(DebugCallFlat {
Expand Down
3 changes: 3 additions & 0 deletions core/node/da_clients/src/eigen/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,6 @@ pub fn compile_protos() {

The generated folder is considered a temporary solution until the EigenDA has a library with either a protogen, or
preferably a full Rust client implementation.

proto files are not included here to not create confusion in case they are not updated in time, so the EigenDA
[repo](https://github.com/Layr-Labs/eigenda/tree/master/api/proto) has to be a source of truth for the proto files.
2 changes: 0 additions & 2 deletions core/node/da_dispatcher/src/da_dispatcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ impl DataAvailabilityDispatcher {

for batch in batches {
let dispatch_latency = METRICS.blob_dispatch_latency.start();
METRICS.blobs_pending_dispatch.inc_by(1);
let dispatch_response = retry(self.config.max_retries(), batch.l1_batch_number, || {
self.client
.dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone())
Expand Down Expand Up @@ -115,7 +114,6 @@ impl DataAvailabilityDispatcher {
.set(batch.l1_batch_number.0 as usize);
METRICS.blob_size.observe(batch.pubdata.len());
METRICS.blobs_dispatched.inc_by(1);
METRICS.blobs_pending_dispatch.dec_by(1);

tracing::info!(
"Dispatched a DA for batch_number: {}, pubdata_size: {}, dispatch_latency: {dispatch_latency_duration:?}",
Expand Down
2 changes: 0 additions & 2 deletions core/node/da_dispatcher/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ pub(super) struct DataAvailabilityDispatcherMetrics {
/// Buckets are bytes ranging from 1 KB to 16 MB, which has to satisfy all blob size values.
#[metrics(buckets = Buckets::exponential(1_024.0..=16.0 * 1_024.0 * 1_024.0, 2.0), unit = Unit::Bytes)]
pub blob_size: Histogram<usize>,
/// Amount of pending blobs to be dispatched.
pub blobs_pending_dispatch: Gauge<usize>,
/// Total number of blobs dispatched.
pub blobs_dispatched: Gauge<usize>,
/// Total number of blobs included.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ impl WiringLayer for EigenWiringLayer {
}

async fn wire(self, input: Self::Input) -> Result<Self::Output, WiringError> {
let master_pool = input.master_pool.get_custom(2).await?;
let master_pool = input.master_pool.get().await?;
let get_blob_from_db = GetBlobFromDB { pool: master_pool };
let client: Box<dyn DataAvailabilityClient> = Box::new(
EigenClientProxy::new(self.config, self.secrets, Box::new(get_blob_from_db)).await?,
Expand All @@ -71,7 +71,7 @@ impl GetBlobData for GetBlobFromDB {
async fn call(&self, input: &'_ str) -> Result<Option<Vec<u8>>, Box<dyn Error + Send + Sync>> {
let pool = self.pool.clone();
let input = input.to_string();
let mut conn = pool.connection_tagged("da_dispatcher").await?;
let mut conn = pool.connection_tagged("eigen_client").await?;
let batch = conn
.data_availability_dal()
.get_blob_data_by_blob_id(&input)
Expand Down
Loading

0 comments on commit 051c661

Please sign in to comment.