From 03e28ae3983ec064507e71138ae58361071e3a0e Mon Sep 17 00:00:00 2001 From: borispovod Date: Fri, 24 Nov 2023 03:06:15 +0400 Subject: [PATCH] feat: sequencer --- Cargo.lock | 250 ++++-- Cargo.toml | 62 +- bin/magi.rs | 201 +++-- bin/network.rs | 79 +- src/common/mod.rs | 207 ----- src/config/mod.rs | 1084 +++++++++++++++---------- src/derive/mod.rs | 149 ++-- src/derive/stages/attributes.rs | 557 ++++++++----- src/derive/stages/batches.rs | 35 +- src/derive/stages/channels.rs | 36 +- src/derive/state.rs | 45 +- src/driver/engine_driver.rs | 93 ++- src/driver/info.rs | 98 ++- src/driver/mod.rs | 447 +++++++--- src/engine/api.rs | 60 +- src/engine/payload.rs | 52 +- src/l1/mod.rs | 125 ++- src/lib.rs | 4 +- src/network/handlers/block_handler.rs | 160 +++- src/network/mod.rs | 9 + src/network/service/discovery.rs | 22 +- src/network/service/mod.rs | 80 +- src/network/signer.rs | 202 +++++ src/rpc/mod.rs | 70 +- src/telemetry/metrics.rs | 10 +- src/types/attributes.rs | 500 ++++++++++++ src/types/common.rs | 341 ++++++++ src/types/mod.rs | 3 + src/types/rpc.rs | 54 ++ 29 files changed, 3632 insertions(+), 1403 deletions(-) delete mode 100644 src/common/mod.rs create mode 100644 src/network/signer.rs create mode 100644 src/types/attributes.rs create mode 100644 src/types/common.rs create mode 100644 src/types/mod.rs create mode 100644 src/types/rpc.rs diff --git a/Cargo.lock b/Cargo.lock index ffe3b4b8..4d81f765 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -200,6 +200,54 @@ dependencies = [ "winapi", ] +[[package]] +name = "anstream" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" + +[[package]] +name = "anstyle-parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +dependencies = [ + "anstyle", + "windows-sys", +] + [[package]] name = "anyhow" version = "1.0.71" @@ -387,9 +435,12 @@ dependencies = [ [[package]] name = "atomic" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" +checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +dependencies = [ + "bytemuck", +] [[package]] name = "atomic-waker" @@ -397,17 +448,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "auto_impl" version = "1.1.0" @@ -524,9 +564,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "bitvec" @@ -624,6 +664,12 @@ version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" +[[package]] +name = "bytemuck" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" + [[package]] name = "byteorder" version = "1.4.3" @@ -794,42 +840,43 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.25" +version = "4.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +checksum = "2275f18819641850fa26c89acc84d465c1bf91ce57bc2748b28c420473352f64" dependencies = [ - "atty", - "bitflags 1.3.2", + "clap_builder", "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07cdf1b148b25c1e1f7a42225e30a0d99a615cd4637eae7365548dd4529b95bc" +dependencies = [ + "anstream", + "anstyle", "clap_lex", - "indexmap 1.9.3", - "once_cell", "strsim", - "termcolor", - "textwrap", ] [[package]] name = "clap_derive" -version = "3.2.25" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", - "proc-macro-error", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.25", ] [[package]] name = "clap_lex" -version = "0.2.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "coins-bip32" @@ -887,6 +934,12 @@ dependencies = [ "thiserror", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "concurrent-queue" version = "2.2.0" @@ -2012,14 +2065,14 @@ checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "figment" -version = "0.10.10" +version = "0.10.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4547e226f4c9ab860571e070a9034192b3175580ecea38da34fcdb53a018c9a5" +checksum = "649f3e5d826594057e9a519626304d8da859ea8a0b18ce99500c586b8d45faee" dependencies = [ "atomic", "pear", "serde", - "toml 0.7.6", + "toml 0.8.8", "uncased", "version_check", ] @@ -2502,15 +2555,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.3.2" @@ -2875,7 +2919,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi", "libc", "windows-sys", ] @@ -2904,8 +2948,8 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.2", - "rustix 0.38.4", + "hermit-abi", + "rustix 0.38.25", "windows-sys", ] @@ -3111,9 +3155,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libflate" @@ -3563,9 +3607,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lock_api" @@ -3607,6 +3651,7 @@ version = "0.1.0" dependencies = [ "again", "ansi_term", + "arc-swap", "async-trait", "bytes", "chrono", @@ -3635,7 +3680,10 @@ dependencies = [ "serde_json", "snap", "ssz_rs", + "tempfile", + "thiserror", "tokio", + "toml 0.8.8", "tracing", "tracing-appender", "tracing-subscriber", @@ -3994,7 +4042,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi", "libc", ] @@ -4143,12 +4191,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" -[[package]] -name = "os_str_bytes" -version = "6.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac" - [[package]] name = "overload" version = "0.1.1" @@ -4891,6 +4933,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -5163,14 +5214,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.4" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.3", + "linux-raw-sys 0.4.11", "windows-sys", ] @@ -5480,9 +5531,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" dependencies = [ "serde", ] @@ -5926,14 +5977,14 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand 2.0.0", - "redox_syscall 0.3.5", - "rustix 0.38.4", + "redox_syscall 0.4.1", + "rustix 0.38.25", "windows-sys", ] @@ -5948,21 +5999,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thiserror" version = "1.0.43" @@ -6203,14 +6239,26 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.19.12", +] + +[[package]] +name = "toml" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.21.0", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -6225,7 +6273,20 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.4.9", +] + +[[package]] +name = "toml_edit" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow 0.5.19", ] [[package]] @@ -6533,6 +6594,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "0.8.2" @@ -7127,6 +7194,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829846f3e3db426d4cee4510841b71a8e58aa2a76b1132579487ae430ccd9c7b" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" diff --git a/Cargo.toml b/Cargo.toml index 2b50aeb4..6bd95c03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,9 @@ [package] +default-run = "magi" +edition = "2021" +license = "AGPL-3.0-only" name = "magi" version = "0.1.0" -license = "AGPL-3.0-only" -edition = "2021" -default-run = "magi" [[bin]] name = "magi" @@ -14,57 +14,63 @@ name = "network" path = "./bin/network.rs" [dependencies] -tokio = { version = "1.28.0", features = ["full"] } +again = "0.1" +arc-swap = "1.6.0" async-trait = "0.1.73" +ethers = {version = "2.0.8", features = ["optimism"]} eyre = "0.6.8" -ethers = { version = "2.0.8", features = ["optimism"] } +futures = "0.3.28" +futures-timer = "0.3.0" hex = "0.4.3" +jsonrpsee = {version = "0.17.0", features = ["server", "macros"]} libflate = "1.2.0" -openssl = { version = "0.10", features = ["vendored"] } once_cell = "1" -jsonrpsee = {version = "0.17.0", features = ["server", "macros"]} -futures = "0.3.28" -futures-timer = "0.3.0" -again = "0.1" +openssl = {version = "0.10", features = ["vendored"]} +tokio = {version = "1.28.0", features = ["full"]} +thiserror = "1.0" # Logging and Metrics +ansi_term = "0.12.1" chrono = "0.4.22" +lazy_static = "1.4.0" +prometheus_exporter = "0.8.5" tracing = "0.1.36" -ansi_term = "0.12.1" tracing-appender = "0.2.2" -tracing-subscriber = { version = "0.3.16", features = [ - "fmt", - "env-filter", - "ansi", - "tracing-log", -] } -prometheus_exporter = "0.8.5" -lazy_static = "1.4.0" +tracing-subscriber = {version = "0.3.16", features = [ + "fmt", + "env-filter", + "ansi", + "tracing-log", +]} # Serialization -serde = { version = "1.0.152", features = ["derive"] } +serde = {version = "1.0.152", features = ["derive"]} serde_json = "1.0.93" # Backend Crates -uuid = { version = "1.3.0", features = ["v4"] } bytes = "1.4.0" -reqwest = "0.11.14" jsonwebtoken = "8.2.0" rand = "0.8.5" +reqwest = "0.11.14" +uuid = {version = "1.3.0", features = ["v4"]} # Networking discv5 = "0.2.2" -libp2p = { version = "0.51.3", features = ["macros", "tokio", "tcp", "mplex", "noise", "gossipsub", "ping"] } -libp2p-identity = { version = "0.1.2", features = ["secp256k1"] } -unsigned-varint = "0.7.1" +libp2p = {version = "0.51.3", features = ["macros", "tokio", "tcp", "mplex", "noise", "gossipsub", "ping"]} +libp2p-identity = {version = "0.1.2", features = ["secp256k1"]} snap = "1" ssz_rs = "0.8.0" +unsigned-varint = "0.7.1" # CLI -figment = { version = "0.10.8", features = ["toml", "env"] } -ctrlc = { version = "3.2.3", features = ["termination"] } -clap = { version = "3.2.18", features = ["derive", "env"] } +clap = {version = "4.4.4", features = ["derive", "env", "string"]} +ctrlc = {version = "3.2.3", features = ["termination"]} dirs = "4.0.0" +figment = {version = "0.10.11", features = ["toml", "env"]} +toml = "0.8.2" + +[dev-dependencies] +tempfile = "3.8.1" [features] default = ["test-utils"] diff --git a/bin/magi.rs b/bin/magi.rs index e6a459dc..0f84a66b 100644 --- a/bin/magi.rs +++ b/bin/magi.rs @@ -1,16 +1,21 @@ -use std::path::PathBuf; -use std::{env::current_dir, process}; +use std::{fs, net::SocketAddr, path::PathBuf, process}; use clap::Parser; use dirs::home_dir; -use eyre::Result; +use discv5::enr::{CombinedKey, Enr}; +use eyre::{anyhow, Result}; +use libp2p_identity::secp256k1::SecretKey; +use serde::Serialize; use magi::{ - config::{ChainConfig, CliConfig, Config, SyncMode}, + config::{ + secret_key_from_hex, serialize_secret_key, ChainConfig, CliConfig, Config, ConfigBuilder, + SyncMode, + }, + network, runner::Runner, telemetry::{self, metrics}, }; -use serde::Serialize; #[tokio::main] async fn main() -> Result<()> { @@ -20,10 +25,11 @@ async fn main() -> Result<()> { let logs_dir = cli.logs_dir.clone(); let logs_rotation = cli.logs_rotation.clone(); let checkpoint_hash = cli.checkpoint_hash.clone(); - let config = cli.to_config(); + let metrics_listen = cli.metrics_listen; + let config = cli.to_config()?; let _guards = telemetry::init(verbose, logs_dir, logs_rotation); - metrics::init()?; + metrics::init(metrics_listen)?; let runner = Runner::from_config(config) .with_sync_mode(sync_mode) @@ -37,7 +43,7 @@ async fn main() -> Result<()> { Ok(()) } -#[derive(Parser, Serialize)] +#[derive(Debug, Parser, Serialize)] pub struct Cli { #[clap(short, long, default_value = "optimism")] network: String, @@ -51,9 +57,6 @@ pub struct Cli { l2_engine_url: Option, #[clap(long)] jwt_secret: Option, - /// Path to a JWT secret to use for authenticated RPC endpoints - #[clap(long)] - jwt_file: Option, #[clap(short = 'v', long)] verbose: bool, #[clap(short = 'p', long)] @@ -68,64 +71,156 @@ pub struct Cli { checkpoint_sync_url: Option, #[clap(long)] devnet: bool, + #[clap(long = "sequencer-enabled")] + sequencer_enabled: bool, + #[clap(long = "sequencer-max-safe-lag", default_value = "0")] + sequencer_max_safe_lag: String, + + /// P2P listening address + #[clap(long, default_value = network::LISTENING_AS_STR)] + p2p_listen: SocketAddr, + + /// Secret key Secp256k1 for P2P. + /// You can pass both the path to the key and the value of the private key itself + /// The private key must be in hexadecimal format with a length of 64 characters. + /// Example: + /// fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3 + /// /path/to/secret_key + #[clap( + long, + value_parser = parse_secret_key_from_cli, + verbatim_doc_comment + )] + #[serde( + serialize_with = "serialize_secret_key", + skip_serializing_if = "Option::is_none" + )] + p2p_secret_key: Option, + + /// Bootnodes to which you need to connect initially. A list of addresses separated by a space is expected in ENR format + /// + /// If not specified, the optimism mainnet will be used. + /// + /// Example: + /// enr:_1 enr:_2 ... enr:_N + #[clap( + long, + verbatim_doc_comment, + value_delimiter = ' ', + num_args = 1.. + )] + p2p_bootnodes: Option>>, + + /// Secret key Secp256k1 for Sequencer. + /// You can pass both the path to the key and the value of the private key itself + /// The private key must be in hexadecimal format with a length of 64 characters. + /// Example: + /// fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3 + /// /path/to/secret_key + #[clap( + long, + value_parser = parse_secret_key_from_cli, + verbatim_doc_comment + )] + #[serde( + serialize_with = "serialize_secret_key", + skip_serializing_if = "Option::is_none" + )] + p2p_sequencer_secret_key: Option, + + /// Metrics listening address. + /// The parameter wouldn't be saved as part as config. + #[clap(long, default_value = metrics::LISTENING_AS_STR)] + metrics_listen: SocketAddr, + + /// Specify the magi working directory. It will store all the necessary data for the launch. + #[clap(long, short = 'd', verbatim_doc_comment, default_value = default_working_dir())] + #[serde(skip)] + working_dir: PathBuf, + + /// Save the configuration to launch Magi in the future + /// The configuration will be saved in the working directory named "magi.toml": /magi.toml + #[clap(long = "save", short = 's', verbatim_doc_comment)] + #[serde(skip)] + save_config: bool, } impl Cli { - pub fn to_config(self) -> Config { - let chain = match self.network.as_str() { - "optimism" => ChainConfig::optimism(), - "optimism-goerli" => ChainConfig::optimism_goerli(), - "optimism-sepolia" => ChainConfig::optimism_sepolia(), - "base" => ChainConfig::base(), - "base-goerli" => ChainConfig::base_goerli(), - file if file.ends_with(".json") => ChainConfig::from_json(file), - _ => panic!( - "Invalid network name. \\ - Please use one of the following: 'optimism', 'optimism-goerli', 'base-goerli'. \\ - You can also use a JSON file path for custom configuration." - ), - }; + pub fn to_config(self) -> eyre::Result { + let chain = ChainConfig::try_from(self.network.as_str())?; - let config_path = home_dir().unwrap().join(".magi/magi.toml"); - let cli_config = CliConfig::from(self); - Config::new(&config_path, cli_config, chain) - } + let mut work_dir = self.working_dir.clone(); + if !work_dir.is_absolute() { + work_dir = std::env::current_dir()?.join(work_dir); + } - pub fn jwt_secret(&self) -> Option { - self.jwt_secret.clone().or(self.jwt_secret_from_file()) - } + let magi_config_path = work_dir.join("magi.toml"); + let save = self.save_config; + let cli_config = CliConfig::try_from(self)?; - pub fn jwt_secret_from_file(&self) -> Option { - let jwt_file = self.jwt_file.as_ref()?; - match std::fs::read_to_string(jwt_file) { - Ok(content) => Some(content), - Err(_) => Cli::default_jwt_secret(), - } - } + let config = ConfigBuilder::default() + .chain(chain) + .toml(&magi_config_path) + .cli(cli_config) + .build(); - pub fn default_jwt_secret() -> Option { - let cur_dir = current_dir().ok()?; - match std::fs::read_to_string(cur_dir.join("jwt.hex")) { - Ok(content) => Some(content), - Err(_) => { - tracing::error!(target: "magi", "Failed to read JWT secret from file: {:?}", cur_dir); - None - } + if save { + config.save(magi_config_path)?; } + + Ok(config) } } -impl From for CliConfig { - fn from(value: Cli) -> Self { - let jwt_secret = value.jwt_secret(); - Self { +impl TryFrom for CliConfig { + type Error = eyre::Report; + + fn try_from(value: Cli) -> Result { + let sequencer = match value.sequencer_enabled { + true => Some(value.sequencer_max_safe_lag.try_into()?), + false => None, + }; + + Ok(Self { l1_rpc_url: value.l1_rpc_url, l2_rpc_url: value.l2_rpc_url, l2_engine_url: value.l2_engine_url, - jwt_secret, + jwt_secret: value.jwt_secret, checkpoint_sync_url: value.checkpoint_sync_url, rpc_port: value.rpc_port, devnet: value.devnet, - } + sequencer, + p2p_secret_key: value.p2p_secret_key, + p2p_listen: value.p2p_listen, + p2p_bootnodes: value.p2p_bootnodes, + p2p_sequencer_secret_key: value.p2p_sequencer_secret_key, + }) } } + +/// The incoming value is the path to the key or a string with the key. +/// The private key must be in hexadecimal format with a length of 64 characters. +fn parse_secret_key_from_cli(value: &str) -> Result { + secret_key_from_hex(value).or_else(|err| { + let path = PathBuf::try_from(value).map_err(|_| err)?; + let key_string = fs::read_to_string(&path) + .map_err(|_| anyhow!("The key file {path:?} was not found."))? + .trim() + .to_string(); + + let key = secret_key_from_hex(&key_string)?; + + Ok(key) + }) +} + +fn default_working_dir() -> String { + home_dir() + .expect( + "Could not determine the home directory in the operating system. \ + Specify the working directory using the \"--work-dir\" parameter", + ) + .join(".magi/") + .display() + .to_string() +} diff --git a/bin/network.rs b/bin/network.rs index 4b6a38e5..d1f24571 100644 --- a/bin/network.rs +++ b/bin/network.rs @@ -1,28 +1,91 @@ -use std::str::FromStr; +#![allow(unused_imports)] +use std::{net::Ipv4Addr, str::FromStr}; +use discv5::{ + enr::{CombinedKey, EnrBuilder}, + Enr, +}; use ethers::types::Address; use eyre::Result; +use ethers::utils::rlp; +use libp2p::gossipsub::IdentTopic; use magi::{ - network::{handlers::block_handler::BlockHandler, service::Service}, + network::{handlers::block_handler::BlockHandler, handlers::Handler, service::Service}, telemetry, }; use tokio::sync::watch; +use unsigned_varint::encode; + +#[derive(Debug)] +pub struct OpStackEnrData { + chain_id: u64, + version: u64, +} + +impl From for Vec { + fn from(value: OpStackEnrData) -> Vec { + let mut chain_id_buf = encode::u128_buffer(); + let chain_id_slice = encode::u128(value.chain_id as u128, &mut chain_id_buf); + + let mut version_buf = encode::u128_buffer(); + let version_slice = encode::u128(value.version as u128, &mut version_buf); + + let opstack = [chain_id_slice, version_slice].concat(); + + rlp::encode(&opstack).to_vec() + } +} #[tokio::main] async fn main() -> Result<()> { let _guards = telemetry::init(false, None, None); - let addr = "0.0.0.0:9876".parse()?; - let chain_id = 420; + let addr = "0.0.0.0:9221".parse()?; + let chain_id = 901; let (_, recv) = watch::channel(Address::from_str( - "0x715b7219d986641df9efd9c7ef01218d528e19ec", + "0xF64c29538cAE4E69eac62c50CDfebAC22b378044", )?); let (block_handler, block_recv) = BlockHandler::new(chain_id, recv); + // channel for sending new blocks to peers + let (_sender, receiver) = tokio::sync::mpsc::channel(1_000); + + // For generation of new Enr uncomment the following code and add private key. + // Generate private key. + // let mut pk = + // hex::decode("private key")?; + // let private_key = CombinedKey::secp256k1_from_bytes(&mut pk)?; + + // // Get RLP for optimism. + // let opstack = OpStackEnrData { + // chain_id, + // version: 0, + // }; + // let opstack_data: Vec = opstack.into(); + + // // Get ERN. + // let enr = EnrBuilder::new("v4") + // .add_value_rlp("opstack", opstack_data.into()) + // .ip4(Ipv4Addr::new(127, 0, 0, 1)) + // .tcp4(9980) + // .udp4(9980) + // .build(&private_key)?; + // println!("ENR: {:?}", enr); + // let bootnodes = vec![enr]; + + let bootnodes: Vec> = + vec![Enr::from_str("enr:-Je4QKqISnjZwcUSRQqLTbOoqFvmQX8sVlPIud5sWPrUp_8hPJXnzSyY-fqXhzqWGKDHjNSLJRbBGjC9VILm_HGuhHkBgmlkgnY0gmlwhH8AAAGHb3BzdGFja4OFBwCJc2VjcDI1NmsxoQMqv564GlblO4zWKiGSn0-lcr70dYrzwiieFETLNEy8xoN0Y3CCJvyDdWRwgib8").map_err(|e| eyre::eyre!("err: {}", e))?]; - Service::new(addr, chain_id) - .add_handler(Box::new(block_handler)) - .start()?; + Service::new( + addr, + chain_id, + Some(bootnodes), + None, + None, + IdentTopic::new(block_handler.topic().to_string()), + ) + .add_handler(Box::new(block_handler)) + .start(receiver)?; while let Ok(payload) = block_recv.recv() { tracing::info!("received unsafe block with hash: {:?}", payload.block_hash); diff --git a/src/common/mod.rs b/src/common/mod.rs deleted file mode 100644 index 34dce8f3..00000000 --- a/src/common/mod.rs +++ /dev/null @@ -1,207 +0,0 @@ -use std::fmt::Debug; - -use ethers::{ - abi::parse_abi_str, - prelude::BaseContract, - types::{Block, Bytes, Transaction, H256, U256}, - utils::rlp::{Decodable, DecoderError, Rlp}, -}; -use eyre::Result; -use figment::value::{Dict, Tag, Value}; -use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; - -use crate::engine::ExecutionPayload; - -/// Selected block header info -#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] -pub struct BlockInfo { - pub hash: H256, - pub number: u64, - pub parent_hash: H256, - pub timestamp: u64, -} - -/// A raw transaction -#[derive(Clone, PartialEq, Eq)] -pub struct RawTransaction(pub Vec); - -/// L1 epoch block -#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] -pub struct Epoch { - pub number: u64, - pub hash: H256, - pub timestamp: u64, -} - -impl From for Value { - fn from(value: BlockInfo) -> Value { - let mut dict = Dict::new(); - dict.insert("hash".to_string(), Value::from(value.hash.as_bytes())); - dict.insert("number".to_string(), Value::from(value.number)); - dict.insert("timestamp".to_string(), Value::from(value.timestamp)); - dict.insert( - "parent_hash".to_string(), - Value::from(value.parent_hash.as_bytes()), - ); - Value::Dict(Tag::Default, dict) - } -} - -impl TryFrom> for BlockInfo { - type Error = eyre::Report; - - fn try_from(block: Block) -> Result { - let number = block - .number - .ok_or(eyre::eyre!("block not included"))? - .as_u64(); - - let hash = block.hash.ok_or(eyre::eyre!("block not included"))?; - - Ok(BlockInfo { - number, - hash, - parent_hash: block.parent_hash, - timestamp: block.timestamp.as_u64(), - }) - } -} - -impl From for Value { - fn from(value: Epoch) -> Self { - let mut dict = Dict::new(); - dict.insert("hash".to_string(), Value::from(value.hash.as_bytes())); - dict.insert("number".to_string(), Value::from(value.number)); - dict.insert("timestamp".to_string(), Value::from(value.timestamp)); - Value::Dict(Tag::Default, dict) - } -} - -impl From<&ExecutionPayload> for BlockInfo { - fn from(value: &ExecutionPayload) -> Self { - Self { - number: value.block_number.as_u64(), - hash: value.block_hash, - parent_hash: value.parent_hash, - timestamp: value.timestamp.as_u64(), - } - } -} - -pub struct AttributesDepositedCall { - pub number: u64, - pub timestamp: u64, - pub basefee: U256, - pub hash: H256, - pub sequence_number: u64, - pub batcher_hash: H256, - pub fee_overhead: U256, - pub fee_scalar: U256, -} - -type SetL1BlockValueInput = (u64, u64, U256, H256, u64, H256, U256, U256); -const L1_BLOCK_CONTRACT_ABI: &str = r#"[ - function setL1BlockValues(uint64 _number,uint64 _timestamp, uint256 _basefee, bytes32 _hash,uint64 _sequenceNumber,bytes32 _batcherHash,uint256 _l1FeeOverhead,uint256 _l1FeeScalar) external -]"#; - -impl TryFrom for AttributesDepositedCall { - type Error = eyre::Report; - - fn try_from(value: Bytes) -> Result { - let abi = BaseContract::from(parse_abi_str(L1_BLOCK_CONTRACT_ABI)?); - - let ( - number, - timestamp, - basefee, - hash, - sequence_number, - batcher_hash, - fee_overhead, - fee_scalar, - ): SetL1BlockValueInput = abi.decode("setL1BlockValues", value)?; - - Ok(Self { - number, - timestamp, - basefee, - hash, - sequence_number, - batcher_hash, - fee_overhead, - fee_scalar, - }) - } -} - -impl From<&AttributesDepositedCall> for Epoch { - fn from(call: &AttributesDepositedCall) -> Self { - Self { - number: call.number, - timestamp: call.timestamp, - hash: call.hash, - } - } -} - -impl Decodable for RawTransaction { - fn decode(rlp: &Rlp) -> Result { - let tx_bytes: Vec = rlp.as_val()?; - Ok(Self(tx_bytes)) - } -} - -impl Debug for RawTransaction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "0x{}", hex::encode(&self.0)) - } -} - -impl Serialize for RawTransaction { - fn serialize(&self, serializer: S) -> Result { - serializer.serialize_str(&format!("0x{}", hex::encode(&self.0))) - } -} - -impl<'de> Deserialize<'de> for RawTransaction { - fn deserialize>(deserializer: D) -> Result { - let tx: String = serde::Deserialize::deserialize(deserializer)?; - let tx = tx.strip_prefix("0x").unwrap_or(&tx); - Ok(RawTransaction(hex::decode(tx).map_err(D::Error::custom)?)) - } -} - -#[cfg(test)] -mod tests { - mod attributed_deposited_call { - use std::str::FromStr; - - use ethers::types::{Bytes, H256}; - - use crate::common::AttributesDepositedCall; - - #[test] - fn decode_from_bytes() -> eyre::Result<()> { - // Arrange - let calldata = "0x015d8eb900000000000000000000000000000000000000000000000000000000008768240000000000000000000000000000000000000000000000000000000064443450000000000000000000000000000000000000000000000000000000000000000e0444c991c5fe1d7291ff34b3f5c3b44ee861f021396d33ba3255b83df30e357d00000000000000000000000000000000000000000000000000000000000000050000000000000000000000007431310e026b69bfc676c0013e12a1a11411eec9000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240"; - - let expected_hash = - H256::from_str("0444c991c5fe1d7291ff34b3f5c3b44ee861f021396d33ba3255b83df30e357d")?; - let expected_block_number = 8874020; - let expected_timestamp = 1682191440; - - // Act - let call = AttributesDepositedCall::try_from(Bytes::from_str(calldata)?); - - // Assert - assert!(call.is_ok()); - let call = call.unwrap(); - - assert_eq!(call.hash, expected_hash); - assert_eq!(call.number, expected_block_number); - assert_eq!(call.timestamp, expected_timestamp); - - Ok(()) - } - } -} diff --git a/src/config/mod.rs b/src/config/mod.rs index e3f86455..c19ce559 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -1,13 +1,19 @@ -use std::{iter, path::PathBuf, process::exit, str::FromStr}; +use std::{fmt::Debug, fs, iter, net::SocketAddr, path::Path, str::FromStr}; +use discv5::enr::{CombinedKey, Enr}; use ethers::types::{Address, H256, U256}; +use eyre::{anyhow, ensure, Result}; use figment::{ providers::{Format, Serialized, Toml}, - Figment, + Figment, Provider, }; -use serde::{Deserialize, Serialize}; +use libp2p_identity::secp256k1::SecretKey; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use crate::common::{BlockInfo, Epoch}; +use crate::{ + network, + types::common::{BlockInfo, Epoch}, +}; /// Sync Mode Specifies how `magi` should sync the L2 chain #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] @@ -36,61 +42,234 @@ impl FromStr for SyncMode { } } -/// A system configuration -#[derive(Debug, Clone, Deserialize)] +/// Sequencer config. +/// The tuple is maximum lag between safe L2 block (confirmed by L1) and new block. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +pub struct SequencerConfig(u64); + +impl SequencerConfig { + pub fn max_safe_lag(&self) -> u64 { + self.0 + } +} + +impl TryFrom for SequencerConfig { + type Error = eyre::Report; + + fn try_from(value: String) -> Result { + let max_safe_lag = value + .parse() + .map_err(|_| eyre::eyre!("failed to parse sequencer max safe lag"))?; + + Ok(SequencerConfig(max_safe_lag)) + } +} + +/// A system configuration. Can be built by combining different sources using `ConfigBuilder`. +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { /// The base chain RPC URL pub l1_rpc_url: String, + /// The L2 chain RPC URL pub l2_rpc_url: String, + /// The L2 engine API URL pub l2_engine_url: String, + /// The base chain config pub chain: ChainConfig, + /// Engine API JWT Secret /// This is used to authenticate with the engine API + #[serde(skip_serializing_if = "String::is_empty")] pub jwt_secret: String, + /// A trusted L2 RPC URL to use for fast/checkpoint syncing pub checkpoint_sync_url: Option, + /// The port of RPC server pub rpc_port: u16, - /// The devnet mode. + + /// The devnet mode pub devnet: bool, + + /// The sequencer config. + pub sequencer: Option, + + /// P2P listening address + pub p2p_listen: SocketAddr, + + // Secret key Secp256k1 for P2P. + #[serde( + default, + serialize_with = "serialize_secret_key", + deserialize_with = "deserialize_secret_key", + skip_serializing_if = "Option::is_none" + )] + pub p2p_secret_key: Option, + + /// Bootnodes to which you need to connect initially. + #[serde(skip_serializing_if = "Option::is_none")] + pub p2p_bootnodes: Option>>, + + #[serde( + default, + serialize_with = "serialize_secret_key", + deserialize_with = "deserialize_secret_key", + skip_serializing_if = "Option::is_none" + )] + pub p2p_sequencer_secret_key: Option, } impl Config { - pub fn new(config_path: &PathBuf, cli_config: CliConfig, chain: ChainConfig) -> Self { - let defaults_provider = Serialized::defaults(DefaultsProvider::default()); + pub fn save(&self, path: impl AsRef + Debug) -> Result<()> { + if path.as_ref().exists() { + ensure!( + path.as_ref().is_file(), + "An incorrect configuration path {path:?} was passed" + ); + } else { + let dir = path.as_ref() + .parent() + .ok_or(anyhow!("An incorrect configuration path {path:?} was passed. Only the filename was specified."))?; + if !dir.exists() { + fs::create_dir_all(dir)?; + } + } + + let config_as_string = toml::to_string(self)?; + + fs::write(path, config_as_string)?; + Ok(()) + } +} + +impl Default for Config { + fn default() -> Self { + Self { + l1_rpc_url: "http://127.0.0.1:8545".to_string(), + l2_rpc_url: "http://127.0.0.1:9545".to_string(), + l2_engine_url: "http://127.0.0.1:9551".to_string(), + chain: ChainConfig::optimism(), + jwt_secret: Default::default(), + checkpoint_sync_url: None, + rpc_port: 7545, + devnet: false, + sequencer: None, + p2p_listen: *network::LISTENING, + p2p_secret_key: None, + p2p_bootnodes: None, + p2p_sequencer_secret_key: None, + } + } +} + +pub struct ConfigBuilder { + figment: Figment, +} + +/// System configuration builder +impl ConfigBuilder { + pub fn defaults(self, defaults: impl Serialize) -> Self { + self.merge(Serialized::defaults(defaults)) + } + + pub fn chain(self, chain: ChainConfig) -> Self { let chain_provider: Serialized = chain.into(); - let toml_provider = Toml::file(config_path).nested(); - let cli_provider = Serialized::defaults(cli_config); - - let config_res = Figment::new() - .merge(defaults_provider) - .merge(chain_provider) - .merge(toml_provider) - .merge(cli_provider) - .extract(); - - match config_res { - Ok(config) => config, - Err(err) => { + self.merge(chain_provider) + } + + pub fn toml(self, toml_path: impl AsRef + Debug) -> Self { + let mut toml_provider = Toml::file(&toml_path); + if !toml_path.as_ref().exists() { + toml_provider = toml_provider.nested(); + } + self.toml_internal(toml_provider) + } + + pub fn cli(self, cli_config: CliConfig) -> Self { + self.merge(Serialized::defaults(cli_config)) + } + + pub fn build(self) -> Config { + self.figment.extract().unwrap_or_else(|err| { match err.kind { figment::error::Kind::MissingField(field) => { let field = field.replace('_', "-"); - println!("\x1b[91merror\x1b[0m: missing configuration field: {field}"); - println!("\n\ttry supplying the propoper command line argument: --{field}"); - println!("\talternatively, you can add the field to your magi.toml file or as an environment variable"); - println!("\nfor more information, check the github README"); + eprintln!("\x1b[91merror\x1b[0m: missing configuration field: {field}"); + eprintln!("\n\ttry supplying the proper command line argument: --{field}"); + eprintln!("\talternatively, you can add the field to your magi.toml file or as an environment variable"); + eprintln!("\nfor more information, check the github README"); } - _ => println!("cannot parse configuration: {err}"), + _ => eprintln!("cannot parse configuration: {err}"), } - exit(1); - } + std::process::exit(1); + }) + } + + fn toml_internal(self, toml_provider: figment::providers::Data) -> Self { + self.merge(toml_provider) + } + + fn merge(self, provider: impl Provider) -> Self { + Self { + figment: self.figment.merge(provider), } } } +impl Default for ConfigBuilder { + fn default() -> Self { + Self { + figment: Figment::new().merge(Serialized::defaults(Config::default())), + } + } +} + +pub fn serialize_secret_key(secret_key: &Option, s: S) -> Result +where + S: Serializer, +{ + let secret_key_bytes = secret_key.as_ref().map(|v| v.to_bytes()); + let secret_key_hex = secret_key_bytes.map(hex::encode); + s.serialize_some(&secret_key_hex) +} + +pub fn deserialize_secret_key<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let hex_key: Option = Option::deserialize(deserializer)?; + let secret_key = match hex_key { + Some(hex_string) => { + Some(secret_key_from_hex(&hex_string).map_err(serde::de::Error::custom)?) + } + None => None, + }; + + Ok(secret_key) +} + +pub fn secret_key_from_hex(value: &str) -> eyre::Result { + let bytes: [u8; 32] = hex::decode(value) + .ok() + .and_then(|bytes| bytes.try_into().ok()) + .ok_or(anyhow!("Invalid private key passed"))?; + + let secret_key = SecretKey::try_from_bytes(bytes)?; + Ok(secret_key) +} + +pub fn serialize_u256_with_leading_zeroes(value: &U256, s: S) -> Result +where + S: Serializer, +{ + let mut buf = [0; 32]; + value.to_big_endian(&mut buf); + s.serialize_some(&H256::from_slice(&buf)) +} + /// Chain config items derived from the CLI #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CliConfig { @@ -104,69 +283,123 @@ pub struct CliConfig { pub jwt_secret: Option, #[serde(skip_serializing_if = "Option::is_none")] pub checkpoint_sync_url: Option, + #[serde(default, skip_serializing_if = "std::ops::Not::not")] + pub devnet: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub sequencer: Option, #[serde(skip_serializing_if = "Option::is_none")] pub rpc_port: Option, - #[serde(default)] - pub devnet: bool, + + /// Secret key Secp256k1 for P2P. + #[serde( + skip_serializing_if = "Option::is_none", + serialize_with = "serialize_secret_key", + deserialize_with = "deserialize_secret_key" + )] + pub p2p_secret_key: Option, + + /// P2P listening address + pub p2p_listen: SocketAddr, + + /// Bootnodes to which you need to connect initially + #[serde(skip_serializing_if = "Option::is_none")] + pub p2p_bootnodes: Option>>, + + /// Secret key Secp256k1 for Sequencer. + #[serde( + skip_serializing_if = "Option::is_none", + serialize_with = "serialize_secret_key", + deserialize_with = "deserialize_secret_key" + )] + pub p2p_sequencer_secret_key: Option, } /// A Chain Configuration -#[derive(Debug, Clone, Serialize, Deserialize)] +/// +/// This structure is also used to parse external chain configs from JSON. +/// This interface extends the default output of the `op-node` genesis devnet +/// setup command `--outfile.rollup` flag. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ChainConfig { /// The network name + #[serde(default = "default_network")] pub network: String, /// The L1 chain id pub l1_chain_id: u64, /// The L2 chain id pub l2_chain_id: u64, - /// The L1 block referenced by the L2 chain - pub l1_start_epoch: Epoch, - /// The L2 genesis block info - pub l2_genesis: BlockInfo, - /// The initial system config value - pub system_config: SystemConfig, - /// The batch inbox address - pub batch_inbox: Address, - /// The deposit contract address - pub deposit_contract: Address, - /// The L1 system config contract - pub system_config_contract: Address, + /// Genesis configuration + pub genesis: GenesisInfo, + /// Network block time + #[serde(default = "default_block_time")] + pub block_time: u64, + /// Maximum timestamp drift + pub max_sequencer_drift: u64, + /// Number of L1 blocks in a sequence window + pub seq_window_size: u64, /// The maximum byte size of all pending channels + #[serde(default = "default_max_channel_size")] pub max_channel_size: u64, /// The max timeout for a channel (as measured by the frame L1 block number) pub channel_timeout: u64, - /// Number of L1 blocks in a sequence window - pub seq_window_size: u64, - /// Maximum timestamp drift - pub max_seq_drift: u64, /// Timestamp of the regolith hardfork pub regolith_time: u64, - /// Network blocktime - #[serde(default = "default_blocktime")] - pub blocktime: u64, + /// The batch inbox address + pub batch_inbox_address: Address, + /// The deposit contract address + pub deposit_contract_address: Address, + /// The L1 system config contract + pub l1_system_config_address: Address, /// L2 To L1 Message passer address + #[serde(default = "default_l2_to_l1_message_passer")] pub l2_to_l1_message_passer: Address, } +impl TryFrom<&str> for ChainConfig { + type Error = eyre::Report; + + fn try_from(value: &str) -> Result { + let chain = match value { + "optimism" => ChainConfig::optimism(), + "optimism-goerli" => ChainConfig::optimism_goerli(), + "optimism-sepolia" => ChainConfig::optimism_sepolia(), + "base" => ChainConfig::base(), + "base-goerli" => ChainConfig::base_goerli(), + file if file.ends_with(".json") => ChainConfig::from_json(file), + _ => eyre::bail!( + "Invalid network name. \\ + Please use one of the following: 'optimism', 'optimism-goerli', 'base-goerli', `optimism-sepolia`, `base`, `base-goerli` \\ + You can also use a JSON file path for custom configuration." + ), + }; + + Ok(chain) + } +} + /// Optimism system config contract values -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] pub struct SystemConfig { /// Batch sender address - pub batch_sender: Address, + pub batcher_addr: Address, /// L2 gas limit - pub gas_limit: U256, - /// Fee overhead - pub l1_fee_overhead: U256, - /// Fee scalar - pub l1_fee_scalar: U256, + pub gas_limit: u64, + /// L1 fee overhead + #[serde(serialize_with = "serialize_u256_with_leading_zeroes")] + pub overhead: U256, + /// L1 fee scalar + #[serde(serialize_with = "serialize_u256_with_leading_zeroes")] + pub scalar: U256, /// Sequencer's signer for unsafe blocks + #[serde(default)] pub unsafe_block_signer: Address, } impl SystemConfig { /// Encoded batch sender as a H256 pub fn batcher_hash(&self) -> H256 { - let mut batch_sender_bytes = self.batch_sender.as_bytes().to_vec(); + let mut batch_sender_bytes = self.batcher_addr.as_bytes().to_vec(); let mut batcher_hash = iter::repeat(0).take(12).collect::>(); batcher_hash.append(&mut batch_sender_bytes); H256::from_slice(&batcher_hash) @@ -192,29 +425,11 @@ impl From for Serialized { } } -#[derive(Debug, Clone, Serialize, Deserialize)] -struct DefaultsProvider { - l2_rpc_url: String, - l2_engine_url: String, - rpc_port: u16, -} - -impl Default for DefaultsProvider { - fn default() -> Self { - Self { - l2_rpc_url: "http://127.0.0.1:8545".to_string(), - l2_engine_url: "http://127.0.0.1:8551".to_string(), - rpc_port: 9545, - } - } -} - impl ChainConfig { /// Read and parse a chain config object from a JSON file path pub fn from_json(path: &str) -> Self { let file = std::fs::File::open(path).unwrap(); - let external: ExternalChainConfig = serde_json::from_reader(file).unwrap(); - external.into() + serde_json::from_reader(file).unwrap() } pub fn optimism() -> Self { @@ -222,35 +437,41 @@ impl ChainConfig { network: "optimism".to_string(), l1_chain_id: 1, l2_chain_id: 10, - l1_start_epoch: Epoch { - hash: hash("0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108"), - number: 17422590, - timestamp: 1686068903, - }, - l2_genesis: BlockInfo { - hash: hash("0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3"), - number: 105235063, - parent_hash: hash( - "0x21a168dfa5e727926063a28ba16fd5ee84c814e847c81a699c7a0ea551e4ca50", - ), - timestamp: 1686068903, - }, - system_config: SystemConfig { - batch_sender: addr("0x6887246668a3b87f54deb3b94ba47a6f63f32985"), - gas_limit: U256::from(30_000_000), - l1_fee_overhead: U256::from(188), - l1_fee_scalar: U256::from(684000), - unsafe_block_signer: addr("0xAAAA45d9549EDA09E70937013520214382Ffc4A2"), + genesis: GenesisInfo { + l1: ChainGenesisInfo { + hash: hash( + "0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108", + ), + number: 17422590, + parent_hash: H256::zero(), + }, + l2: ChainGenesisInfo { + hash: hash( + "0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3", + ), + number: 105235063, + parent_hash: hash( + "0x21a168dfa5e727926063a28ba16fd5ee84c814e847c81a699c7a0ea551e4ca50", + ), + }, + l2_time: 1686068903, + system_config: SystemConfig { + batcher_addr: addr("0x6887246668a3b87f54deb3b94ba47a6f63f32985"), + gas_limit: 30_000_000, + overhead: U256::from(188), + scalar: U256::from(684000), + unsafe_block_signer: addr("0xAAAA45d9549EDA09E70937013520214382Ffc4A2"), + }, }, - batch_inbox: addr("0xff00000000000000000000000000000000000010"), - deposit_contract: addr("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"), - system_config_contract: addr("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"), + batch_inbox_address: addr("0xff00000000000000000000000000000000000010"), + deposit_contract_address: addr("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"), + l1_system_config_address: addr("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"), l2_to_l1_message_passer: addr("0x4200000000000000000000000000000000000016"), max_channel_size: 100_000_000, channel_timeout: 300, seq_window_size: 3600, - max_seq_drift: 600, - blocktime: 2, + max_sequencer_drift: 600, + block_time: 2, regolith_time: 0, } } @@ -260,73 +481,86 @@ impl ChainConfig { network: "optimism-goerli".to_string(), l1_chain_id: 5, l2_chain_id: 420, - l1_start_epoch: Epoch { - hash: hash("0x6ffc1bf3754c01f6bb9fe057c1578b87a8571ce2e9be5ca14bace6eccfd336c7"), - number: 8300214, - timestamp: 1673550516, - }, - l2_genesis: BlockInfo { - hash: hash("0x0f783549ea4313b784eadd9b8e8a69913b368b7366363ea814d7707ac505175f"), - number: 4061224, - parent_hash: hash( - "0x31267a44f1422f4cab59b076548c075e79bd59e691a23fbce027f572a2a49dc9", - ), - timestamp: 1673550516, - }, - system_config: SystemConfig { - batch_sender: addr("0x7431310e026b69bfc676c0013e12a1a11411eec9"), - gas_limit: U256::from(25_000_000), - l1_fee_overhead: U256::from(2100), - l1_fee_scalar: U256::from(1000000), - unsafe_block_signer: addr("0x715b7219D986641DF9eFd9C7Ef01218D528e19ec"), + genesis: GenesisInfo { + l1: ChainGenesisInfo { + hash: hash( + "0x6ffc1bf3754c01f6bb9fe057c1578b87a8571ce2e9be5ca14bace6eccfd336c7", + ), + number: 8300214, + parent_hash: H256::zero(), + }, + l2: ChainGenesisInfo { + hash: hash( + "0x0f783549ea4313b784eadd9b8e8a69913b368b7366363ea814d7707ac505175f", + ), + number: 4061224, + parent_hash: hash( + "0x31267a44f1422f4cab59b076548c075e79bd59e691a23fbce027f572a2a49dc9", + ), + }, + l2_time: 1673550516, + system_config: SystemConfig { + batcher_addr: addr("0x7431310e026b69bfc676c0013e12a1a11411eec9"), + gas_limit: 25_000_000, + overhead: U256::from(2100), + scalar: U256::from(1000000), + unsafe_block_signer: addr("0x715b7219D986641DF9eFd9C7Ef01218D528e19ec"), + }, }, - system_config_contract: addr("0xAe851f927Ee40dE99aaBb7461C00f9622ab91d60"), - batch_inbox: addr("0xff00000000000000000000000000000000000420"), - deposit_contract: addr("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"), + l1_system_config_address: addr("0xAe851f927Ee40dE99aaBb7461C00f9622ab91d60"), + batch_inbox_address: addr("0xff00000000000000000000000000000000000420"), + deposit_contract_address: addr("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"), l2_to_l1_message_passer: addr("0xEF2ec5A5465f075E010BE70966a8667c94BCe15a"), max_channel_size: 100_000_000, channel_timeout: 300, seq_window_size: 3600, - max_seq_drift: 600, + max_sequencer_drift: 600, regolith_time: 1679079600, - blocktime: 2, + block_time: 2, } } + pub fn optimism_sepolia() -> Self { Self { network: "optimism-sepolia".to_string(), l1_chain_id: 11155111, l2_chain_id: 11155420, - l1_start_epoch: Epoch { - hash: hash("0x48f520cf4ddaf34c8336e6e490632ea3cf1e5e93b0b2bc6e917557e31845371b"), - number: 4071408, - timestamp: 1691802540, - }, - l2_genesis: BlockInfo { - hash: hash("0x102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d"), - number: 0, - parent_hash: hash( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - timestamp: 1691802540, - }, - system_config: SystemConfig { - batch_sender: addr("0x8F23BB38F531600e5d8FDDaAEC41F13FaB46E98c"), - gas_limit: U256::from(30_000_000), - l1_fee_overhead: U256::from(188), - l1_fee_scalar: U256::from(684000), - unsafe_block_signer: addr("0x0000000000000000000000000000000000000000"), + genesis: GenesisInfo { + l1: ChainGenesisInfo { + hash: hash( + "0x48f520cf4ddaf34c8336e6e490632ea3cf1e5e93b0b2bc6e917557e31845371b", + ), + number: 4071408, + parent_hash: H256::zero(), + }, + l2: ChainGenesisInfo { + hash: hash( + "0x102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d", + ), + number: 0, + parent_hash: hash( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + }, + l2_time: 1691802540, + system_config: SystemConfig { + batcher_addr: addr("0x8F23BB38F531600e5d8FDDaAEC41F13FaB46E98c"), + gas_limit: 30_000_000, + overhead: U256::from(188), + scalar: U256::from(684000), + unsafe_block_signer: addr("0x0000000000000000000000000000000000000000"), + }, }, - system_config_contract: addr("0x034edd2a225f7f429a63e0f1d2084b9e0a93b538"), - batch_inbox: addr("0xff00000000000000000000000000000011155420"), - deposit_contract: addr("0x16fc5058f25648194471939df75cf27a2fdc48bc"), + l1_system_config_address: addr("0x034edd2a225f7f429a63e0f1d2084b9e0a93b538"), + batch_inbox_address: addr("0xff00000000000000000000000000000011155420"), + deposit_contract_address: addr("0x16fc5058f25648194471939df75cf27a2fdc48bc"), l2_to_l1_message_passer: addr("0x4200000000000000000000000000000000000016"), max_channel_size: 100_000_000, channel_timeout: 300, seq_window_size: 3600, - max_seq_drift: 600, + max_sequencer_drift: 600, regolith_time: 0, - blocktime: 2, + block_time: 2, } } @@ -335,33 +569,39 @@ impl ChainConfig { network: "base".to_string(), l1_chain_id: 1, l2_chain_id: 8453, - l1_start_epoch: Epoch { - number: 17481768, - hash: hash("0x5c13d307623a926cd31415036c8b7fa14572f9dac64528e857a470511fc30771"), - timestamp: 1686789347, - }, - l2_genesis: BlockInfo { - hash: hash("0xf712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd"), - number: 0, - parent_hash: H256::zero(), - timestamp: 1686789347, - }, - system_config: SystemConfig { - batch_sender: addr("0x5050f69a9786f081509234f1a7f4684b5e5b76c9"), - gas_limit: U256::from(30000000), - l1_fee_overhead: U256::from(188), - l1_fee_scalar: U256::from(684000), - unsafe_block_signer: addr("0xAf6E19BE0F9cE7f8afd49a1824851023A8249e8a"), + genesis: GenesisInfo { + l1: ChainGenesisInfo { + number: 17481768, + hash: hash( + "0x5c13d307623a926cd31415036c8b7fa14572f9dac64528e857a470511fc30771", + ), + parent_hash: H256::zero(), + }, + l2: ChainGenesisInfo { + hash: hash( + "0xf712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd", + ), + number: 0, + parent_hash: H256::zero(), + }, + l2_time: 1686789347, + system_config: SystemConfig { + batcher_addr: addr("0x5050f69a9786f081509234f1a7f4684b5e5b76c9"), + gas_limit: 30000000, + overhead: U256::from(188), + scalar: U256::from(684000), + unsafe_block_signer: addr("0xAf6E19BE0F9cE7f8afd49a1824851023A8249e8a"), + }, }, - batch_inbox: addr("0xff00000000000000000000000000000000008453"), - deposit_contract: addr("0x49048044d57e1c92a77f79988d21fa8faf74e97e"), - system_config_contract: addr("0x73a79fab69143498ed3712e519a88a918e1f4072"), + batch_inbox_address: addr("0xff00000000000000000000000000000000008453"), + deposit_contract_address: addr("0x49048044d57e1c92a77f79988d21fa8faf74e97e"), + l1_system_config_address: addr("0x73a79fab69143498ed3712e519a88a918e1f4072"), l2_to_l1_message_passer: addr("0x4200000000000000000000000000000000000016"), max_channel_size: 100_000_000, channel_timeout: 300, seq_window_size: 3600, - max_seq_drift: 600, - blocktime: 2, + max_sequencer_drift: 600, + block_time: 2, regolith_time: 0, } } @@ -371,34 +611,57 @@ impl ChainConfig { network: "base-goerli".to_string(), l1_chain_id: 5, l2_chain_id: 84531, - l1_start_epoch: Epoch { - number: 8410981, - hash: hash("0x73d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d19"), - timestamp: 1675193616, - }, - l2_genesis: BlockInfo { - hash: hash("0xa3ab140f15ea7f7443a4702da64c10314eb04d488e72974e02e2d728096b4f76"), - number: 0, - parent_hash: H256::zero(), - timestamp: 1675193616, - }, - system_config: SystemConfig { - batch_sender: addr("0x2d679b567db6187c0c8323fa982cfb88b74dbcc7"), - gas_limit: U256::from(25_000_000), - l1_fee_overhead: U256::from(2100), - l1_fee_scalar: U256::from(1000000), - unsafe_block_signer: addr("0x32a4e99A72c11E9DD3dC159909a2D7BD86C1Bc51"), + genesis: GenesisInfo { + l1: ChainGenesisInfo { + number: 8410981, + hash: hash( + "0x73d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d19", + ), + parent_hash: H256::zero(), + }, + l2: ChainGenesisInfo { + hash: hash( + "0xa3ab140f15ea7f7443a4702da64c10314eb04d488e72974e02e2d728096b4f76", + ), + number: 0, + parent_hash: H256::zero(), + }, + l2_time: 1675193616, + system_config: SystemConfig { + batcher_addr: addr("0x2d679b567db6187c0c8323fa982cfb88b74dbcc7"), + gas_limit: 25_000_000, + overhead: U256::from(2100), + scalar: U256::from(1000000), + unsafe_block_signer: addr("0x32a4e99A72c11E9DD3dC159909a2D7BD86C1Bc51"), + }, }, - system_config_contract: addr("0xb15eea247ece011c68a614e4a77ad648ff495bc1"), - batch_inbox: addr("0x8453100000000000000000000000000000000000"), - deposit_contract: addr("0xe93c8cd0d409341205a592f8c4ac1a5fe5585cfa"), + l1_system_config_address: addr("0xb15eea247ece011c68a614e4a77ad648ff495bc1"), + batch_inbox_address: addr("0x8453100000000000000000000000000000000000"), + deposit_contract_address: addr("0xe93c8cd0d409341205a592f8c4ac1a5fe5585cfa"), l2_to_l1_message_passer: addr("0x4200000000000000000000000000000000000016"), max_channel_size: 100_000_000, channel_timeout: 100, seq_window_size: 3600, - max_seq_drift: 600, + max_sequencer_drift: 600, regolith_time: 1683219600, - blocktime: 2, + block_time: 2, + } + } + + pub fn l1_start_epoch(&self) -> Epoch { + Epoch { + number: self.genesis.l1.number, + hash: self.genesis.l1.hash, + timestamp: self.genesis.l2_time, + } + } + + pub fn l2_genesis(&self) -> BlockInfo { + BlockInfo { + hash: self.genesis.l2.hash, + number: self.genesis.l2.number, + parent_hash: self.genesis.l2.parent_hash, + timestamp: self.genesis.l2_time, } } } @@ -421,229 +684,48 @@ fn hash(s: &str) -> H256 { H256::from_str(s).unwrap() } -fn default_blocktime() -> u64 { - 2 -} - -/// External chain config -/// -/// This is used to parse external chain configs from JSON. -/// This interface corresponds to the default output of the `op-node` -/// genesis devnet setup command `--outfile.rollup` flag. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ExternalChainConfig { - genesis: ExternalGenesisInfo, - block_time: u64, - max_sequencer_drift: u64, - seq_window_size: u64, - channel_timeout: u64, - l1_chain_id: u64, - l2_chain_id: u64, - regolith_time: u64, - batch_inbox_address: Address, - deposit_contract_address: Address, - l1_system_config_address: Address, +fn default_network() -> String { + "external".to_owned() } -#[derive(Debug, Clone, Serialize, Deserialize)] -struct ExternalGenesisInfo { - l1: ChainGenesisInfo, - l2: ChainGenesisInfo, - l2_time: u64, - system_config: SystemConfigInfo, +fn default_block_time() -> u64 { + 2 } -#[derive(Debug, Clone, Serialize, Deserialize)] -struct SystemConfigInfo { - #[serde(rename = "batcherAddr")] - batcher_addr: Address, - overhead: H256, - scalar: H256, - #[serde(rename = "gasLimit")] - gas_limit: u64, +fn default_max_channel_size() -> u64 { + 100_000_000 } -#[derive(Debug, Clone, Serialize, Deserialize)] -struct ChainGenesisInfo { - hash: H256, - number: u64, +fn default_l2_to_l1_message_passer() -> Address { + addr("0x4200000000000000000000000000000000000016") } -impl From for ChainConfig { - fn from(external: ExternalChainConfig) -> Self { - Self { - network: "external".to_string(), - l1_chain_id: external.l1_chain_id, - l2_chain_id: external.l2_chain_id, - l1_start_epoch: Epoch { - hash: external.genesis.l1.hash, - number: external.genesis.l1.number, - timestamp: 0, - }, - l2_genesis: BlockInfo { - hash: external.genesis.l2.hash, - number: external.genesis.l2.number, - parent_hash: H256::zero(), - timestamp: external.genesis.l2_time, - }, - system_config: SystemConfig { - batch_sender: external.genesis.system_config.batcher_addr, - gas_limit: U256::from(external.genesis.system_config.gas_limit), - l1_fee_overhead: external.genesis.system_config.overhead.0.into(), - l1_fee_scalar: external.genesis.system_config.scalar.0.into(), - unsafe_block_signer: Address::zero(), - }, - batch_inbox: external.batch_inbox_address, - deposit_contract: external.deposit_contract_address, - system_config_contract: external.l1_system_config_address, - max_channel_size: 100_000_000, - channel_timeout: external.channel_timeout, - seq_window_size: external.seq_window_size, - max_seq_drift: external.max_sequencer_drift, - regolith_time: external.regolith_time, - blocktime: external.block_time, - l2_to_l1_message_passer: addr("0x4200000000000000000000000000000000000016"), - } - } +/// Genesis configuration +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct GenesisInfo { + /// L1 genesis configuration + pub l1: ChainGenesisInfo, + /// L2 genesis configuration + pub l2: ChainGenesisInfo, + /// L2 genesis timestamp + pub l2_time: u64, + /// The initial system config value + pub system_config: SystemConfig, } -impl From for ExternalChainConfig { - fn from(chain_config: ChainConfig) -> Self { - let mut overhead = [0; 32]; - let mut scalar = [0; 32]; - - chain_config - .system_config - .l1_fee_overhead - .to_big_endian(&mut overhead); - chain_config - .system_config - .l1_fee_scalar - .to_big_endian(&mut scalar); - - Self { - genesis: ExternalGenesisInfo { - l1: ChainGenesisInfo { - hash: chain_config.l1_start_epoch.hash, - number: chain_config.l1_start_epoch.number, - }, - l2: ChainGenesisInfo { - hash: chain_config.l2_genesis.hash, - number: chain_config.l2_genesis.number, - }, - l2_time: chain_config.l2_genesis.timestamp, - system_config: SystemConfigInfo { - batcher_addr: chain_config.system_config.batch_sender, - overhead: H256::from_slice(&overhead), - scalar: H256::from_slice(&scalar), - gas_limit: chain_config.system_config.gas_limit.as_u64(), - }, - }, - block_time: chain_config.blocktime, - max_sequencer_drift: chain_config.max_seq_drift, - seq_window_size: chain_config.seq_window_size, - channel_timeout: chain_config.channel_timeout, - l1_chain_id: chain_config.l1_chain_id, - l2_chain_id: chain_config.l2_chain_id, - regolith_time: chain_config.regolith_time, - batch_inbox_address: chain_config.batch_inbox, - deposit_contract_address: chain_config.deposit_contract, - l1_system_config_address: chain_config.system_config_contract, - } - } +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ChainGenesisInfo { + pub hash: H256, + pub number: u64, + #[serde(default)] + pub parent_hash: H256, } #[cfg(test)] mod test { use super::*; - #[test] - fn test_chain_config_to_external_chain_config() { - let chain_config = ChainConfig::optimism(); - let external_config: ExternalChainConfig = chain_config.clone().into(); - - assert_eq!( - external_config.max_sequencer_drift, - chain_config.max_seq_drift - ); - assert_eq!( - external_config.seq_window_size, - chain_config.seq_window_size - ); - assert_eq!( - external_config.channel_timeout, - chain_config.channel_timeout - ); - assert_eq!(external_config.l1_chain_id, chain_config.l1_chain_id); - assert_eq!(external_config.l2_chain_id, chain_config.l2_chain_id); - assert_eq!(external_config.block_time, chain_config.blocktime); - assert_eq!(external_config.regolith_time, chain_config.regolith_time); - assert_eq!( - external_config.batch_inbox_address, - chain_config.batch_inbox - ); - assert_eq!( - external_config.deposit_contract_address, - chain_config.deposit_contract - ); - assert_eq!( - external_config.l1_system_config_address, - chain_config.system_config_contract - ); - - assert_eq!( - external_config.genesis.l1.hash, - chain_config.l1_start_epoch.hash - ); - assert_eq!( - external_config.genesis.l1.number, - chain_config.l1_start_epoch.number - ); - assert_eq!( - external_config.genesis.l2.hash, - chain_config.l2_genesis.hash - ); - assert_eq!( - external_config.genesis.l2.number, - chain_config.l2_genesis.number - ); - assert_eq!( - external_config.genesis.l2_time, - chain_config.l2_genesis.timestamp - ); - - assert_eq!( - external_config.genesis.system_config.batcher_addr, - chain_config.system_config.batch_sender - ); - - let mut overhead = [0; 32]; - let mut scalar = [0; 32]; - - chain_config - .system_config - .l1_fee_overhead - .to_big_endian(&mut overhead); - chain_config - .system_config - .l1_fee_scalar - .to_big_endian(&mut scalar); - - assert_eq!( - external_config.genesis.system_config.overhead, - H256::from_slice(&overhead), - ); - assert_eq!( - external_config.genesis.system_config.scalar, - H256::from_slice(&scalar), - ); - - assert_eq!( - external_config.genesis.system_config.gas_limit, - chain_config.system_config.gas_limit.as_u64() - ); - } - #[test] fn test_read_external_chain_from_json() { let devnet_json = r#" @@ -678,50 +760,216 @@ mod test { } "#; - let external: ExternalChainConfig = serde_json::from_str(devnet_json).unwrap(); - let chain: ChainConfig = external.into(); + let chain: ChainConfig = serde_json::from_str(devnet_json).unwrap(); assert_eq!(chain.network, "external"); assert_eq!(chain.l1_chain_id, 900); assert_eq!(chain.l2_chain_id, 901); - assert_eq!(chain.l1_start_epoch.number, 0); + assert_eq!(chain.genesis.l1.number, 0); assert_eq!( - chain.l1_start_epoch.hash, + chain.genesis.l1.hash, hash("0xdb52a58e7341447d1a9525d248ea07dbca7dfa0e105721dee1aa5a86163c088d") ); - assert_eq!(chain.l2_genesis.number, 0); + assert_eq!(chain.genesis.l2.number, 0); assert_eq!( - chain.l2_genesis.hash, + chain.genesis.l2.hash, hash("0xf85bca315a08237644b06a8350cda3bc0de1593745a91be93daeadb28fb3a32e") ); - assert_eq!(chain.system_config.gas_limit, U256::from(30_000_000)); - assert_eq!(chain.system_config.l1_fee_overhead, U256::from(2100)); - assert_eq!(chain.system_config.l1_fee_scalar, U256::from(1_000_000)); + assert_eq!(chain.genesis.system_config.gas_limit, 30_000_000); + assert_eq!(chain.genesis.system_config.overhead, U256::from(2100)); + assert_eq!(chain.genesis.system_config.scalar, U256::from(1_000_000)); assert_eq!( - chain.system_config.batch_sender, + chain.genesis.system_config.batcher_addr, addr("0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc") ); assert_eq!( - chain.batch_inbox, + chain.batch_inbox_address, addr("0xff00000000000000000000000000000000000000") ); assert_eq!( - chain.deposit_contract, + chain.deposit_contract_address, addr("0x6900000000000000000000000000000000000001") ); assert_eq!( - chain.system_config_contract, + chain.l1_system_config_address, addr("0x6900000000000000000000000000000000000009") ); assert_eq!(chain.max_channel_size, 100_000_000); assert_eq!(chain.channel_timeout, 120); assert_eq!(chain.seq_window_size, 200); - assert_eq!(chain.max_seq_drift, 300); + assert_eq!(chain.max_sequencer_drift, 300); assert_eq!(chain.regolith_time, 0); - assert_eq!(chain.blocktime, 2); + assert_eq!(chain.block_time, 2); + } + + #[test] + fn test_write_chain_config_to_json() -> Result<()> { + let chain = ChainConfig::optimism(); + let json = serde_json::to_string(&chain)?; + + let expected_json: String = r#"{ + "network": "optimism", + "l1_chain_id": 1, + "l2_chain_id": 10, + "genesis": { + "l1": { + "hash": "0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108", + "number": 17422590, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l2": { + "hash": "0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3", + "number": 105235063, + "parentHash": "0x21a168dfa5e727926063a28ba16fd5ee84c814e847c81a699c7a0ea551e4ca50" + }, + "l2_time": 1686068903, + "system_config": { + "batcherAddr": "0x6887246668a3b87f54deb3b94ba47a6f63f32985", + "gasLimit": 30000000, + "overhead": "0x00000000000000000000000000000000000000000000000000000000000000bc", + "scalar": "0x00000000000000000000000000000000000000000000000000000000000a6fe0", + "unsafeBlockSigner": "0xaaaa45d9549eda09e70937013520214382ffc4a2" + } + }, + "block_time": 2, + "max_sequencer_drift": 600, + "seq_window_size": 3600, + "max_channel_size": 100000000, + "channel_timeout": 300, + "regolith_time": 0, + "batch_inbox_address": "0xff00000000000000000000000000000000000010", + "deposit_contract_address": "0xbeb5fc579115071764c7423a4f12edde41f106ed", + "l1_system_config_address": "0x229047fed2591dbec1ef1118d64f7af3db9eb290", + "l2_to_l1_message_passer": "0x4200000000000000000000000000000000000016" + }"# + .chars() + .filter(|c| !c.is_whitespace()) + .collect(); + + assert_eq!(json, expected_json); + + Ok(()) + } + + #[test] + fn test_config_save_load() -> Result<()> { + // Fill by any data different from default values: + let config = Config { + l1_rpc_url: "http://localhost:8888".to_string(), + l2_rpc_url: "http://127.0.0.1:9999".to_string(), + l2_engine_url: "http://localhost:5545".to_string(), + chain: ChainConfig::optimism_sepolia(), + jwt_secret: "TestApiKey".to_owned(), + checkpoint_sync_url: Some("http://10.0.0.1:5432".to_string()), + rpc_port: 123, + devnet: true, + sequencer: Some(SequencerConfig(54321)), + p2p_listen: "10.0.0.1:4444".parse()?, + p2p_secret_key: Some(SecretKey::generate()), + p2p_bootnodes: Some(vec![]), + p2p_sequencer_secret_key: Some(SecretKey::generate()), + }; + + let tempfile = tempfile::NamedTempFile::new()?; + + config.save(tempfile.path())?; + + let config_read = ConfigBuilder::default().toml(tempfile.path()).build(); + assert_eq!( - chain.l2_to_l1_message_passer, - addr("0x4200000000000000000000000000000000000016") + serde_json::to_string(&config_read)?, + serde_json::to_string(&config)?, + "`config_read` doesn't match to `config`" ); + + Ok(()) + } + + #[test] + fn test_cli_config_apply_to_toml() -> Result<()> { + // Just test data, which are different from defaults: + const TOML: &str = r#" + l1_rpc_url = "http://localhost:8545" + l2_rpc_url = "http://localhost:9545" + l2_engine_url = "http://localhost:8551" + jwt_secret = "TestApiKey" + rpc_port = 7546 + devnet = true + p2p_listen = "127.0.0.1:9876" + + [chain] + network = "optimism_goerli" + l1_chain_id = 11 + l2_chain_id = 12 + block_time = 2 + max_sequencer_drift = 600 + seq_window_size = 3600 + max_channel_size = 100000000 + channel_timeout = 300 + regolith_time = 0 + batch_inbox_address = "0xff00000000000000000000000000000000000010" + deposit_contract_address = "0xbeb5fc579115071764c7423a4f12edde41f106ed" + l1_system_config_address = "0x229047fed2591dbec1ef1118d64f7af3db9eb290" + l2_to_l1_message_passer = "0x4200000000000000000000000000000000000016" + + [chain.genesis] + l2_time = 1686068905 + + [chain.genesis.l1] + hash = "0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108" + number = 17422590 + parentHash = "0x0000000000000000000000000000000000000000000000000000000000000000" + + [chain.genesis.l2] + hash = "0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3" + number = 105235063 + parentHash = "0x21a168dfa5e727926063a28ba16fd5ee84c814e847c81a699c7a0ea551e4ca50" + + [chain.genesis.system_config] + batcherAddr = "0x6887246668a3b87f54deb3b94ba47a6f63f32985" + gasLimit = 30000000 + overhead = "0xbc" + scalar = "0xa6fe0" + unsafeBlockSigner = "0xaaaa45d9549eda09e70937013520214382ffc4a2" + "#; + + let cli_config = CliConfig { + l1_rpc_url: Some("http://10.0.0.1:3344".to_string()), + l2_rpc_url: Some("http://10.0.0.1:4455".to_string()), + l2_engine_url: None, + jwt_secret: Some("new secret".to_string()), + checkpoint_sync_url: None, + devnet: false, + sequencer: None, + rpc_port: Some(5555), + p2p_secret_key: None, + p2p_listen: *network::LISTENING, + p2p_bootnodes: None, + p2p_sequencer_secret_key: None, + }; + + let mut config_expected = ConfigBuilder::default() + .toml_internal(Toml::string(TOML)) + .build(); + + // Copy only values, overridden by `CliConfig`: + config_expected.l1_rpc_url = cli_config.l1_rpc_url.as_ref().unwrap().clone(); + config_expected.l2_rpc_url = cli_config.l2_rpc_url.as_ref().unwrap().clone(); + config_expected.jwt_secret = cli_config.jwt_secret.as_ref().unwrap().clone(); + config_expected.rpc_port = cli_config.rpc_port.unwrap(); + config_expected.p2p_listen = cli_config.p2p_listen; + + let config = ConfigBuilder::default() + .toml_internal(Toml::string(TOML)) + .cli(cli_config) + .build(); + + assert_eq!( + serde_json::to_string(&config)?, + serde_json::to_string(&config_expected)?, + "`config` doesn't match to `config_expected`" + ); + + Ok(()) } } diff --git a/src/derive/mod.rs b/src/derive/mod.rs index 3f1ef0bc..c963fab6 100644 --- a/src/derive/mod.rs +++ b/src/derive/mod.rs @@ -2,7 +2,7 @@ use std::sync::{mpsc, Arc, RwLock}; use eyre::Result; -use crate::{config::Config, engine::PayloadAttributes}; +use crate::{config::ChainConfig, engine::PayloadAttributes, l1::L1Info, types::common::Epoch}; use self::{ stages::{ @@ -39,12 +39,33 @@ impl Iterator for Pipeline { } impl Pipeline { - pub fn new(state: Arc>, config: Arc, seq: u64) -> Result { + pub fn new( + state: Arc>, + chain: &ChainConfig, + seq: u64, + unsafe_seq: u64, + ) -> Result { let (tx, rx) = mpsc::channel(); let batcher_transactions = BatcherTransactions::new(rx); - let channels = Channels::new(batcher_transactions, config.clone()); - let batches = Batches::new(channels, state.clone(), config.clone()); - let attributes = Attributes::new(Box::new(batches), state, config, seq); + let channels = Channels::new( + batcher_transactions, + chain.max_channel_size, + chain.channel_timeout, + ); + let batches = Batches::new( + channels, + state.clone(), + chain.seq_window_size, + chain.max_sequencer_drift, + chain.block_time, + ); + let attributes = Attributes::new( + Box::new(batches), + state, + chain.regolith_time, + seq, + unsafe_seq, + ); Ok(Self { batcher_transaction_sender: tx, @@ -59,6 +80,16 @@ impl Pipeline { Ok(()) } + pub fn derive_attributes_for_epoch( + &mut self, + epoch: Epoch, + l1_info: &L1Info, + block_timestamp: u64, + ) -> PayloadAttributes { + self.attributes + .derive_attributes_for_epoch(epoch, l1_info, block_timestamp) + } + pub fn peek(&mut self) -> Option<&PayloadAttributes> { if self.pending_attributes.is_none() { let next_attributes = self.next(); @@ -88,69 +119,75 @@ mod tests { }; use crate::{ - common::RawTransaction, config::{ChainConfig, Config}, derive::*, l1::{BlockUpdate, ChainWatcher}, + types::attributes::RawTransaction, }; #[tokio::test(flavor = "multi_thread")] async fn test_attributes_match() { - if std::env::var("L1_TEST_RPC_URL").is_ok() && std::env::var("L2_TEST_RPC_URL").is_ok() { - let rpc = env::var("L1_TEST_RPC_URL").unwrap(); - let l2_rpc = env::var("L2_TEST_RPC_URL").unwrap(); - - let config = Arc::new(Config { - l1_rpc_url: rpc.to_string(), - l2_rpc_url: l2_rpc.to_string(), - chain: ChainConfig::optimism_goerli(), - l2_engine_url: String::new(), - jwt_secret: String::new(), - checkpoint_sync_url: None, - rpc_port: 9545, - devnet: false, - }); - - let mut chain_watcher = ChainWatcher::new( - config.chain.l1_start_epoch.number, - config.chain.l2_genesis.number, - config.clone(), + let rpc_env = std::env::var("L1_TEST_RPC_URL"); + let l2_rpc_env = std::env::var("L2_TEST_RPC_URL"); + let (rpc, l2_rpc) = match (rpc_env, l2_rpc_env) { + (Ok(rpc), Ok(l2_rpc)) => (rpc, l2_rpc), + (rpc_env, l2_rpc_env) => { + eprintln!("Test ignored: `test_attributes_match`, rpc: {rpc_env:?}, l2_rpc: {l2_rpc_env:?}"); + return; + } + }; + + let config = Arc::new(Config { + chain: ChainConfig::optimism_goerli(), + l1_rpc_url: rpc, + l2_rpc_url: l2_rpc, + rpc_port: 9545, + ..Config::default() + }); + + let mut chain_watcher = ChainWatcher::new( + config.chain.genesis.l1.number, + config.chain.genesis.l2.number, + config.clone(), + ) + .unwrap(); + + chain_watcher.start().unwrap(); + + let state = Arc::new(RwLock::new(State::new( + config.chain.l2_genesis(), + config.chain.l1_start_epoch(), + config.chain.l2_genesis(), + config.chain.l1_start_epoch(), + config.chain.seq_window_size, + ))); + + let mut pipeline = Pipeline::new(state.clone(), &config.chain, 0, 0).unwrap(); + + chain_watcher.recv_from_channel().await.unwrap(); + chain_watcher.recv_from_channel().await.unwrap(); + chain_watcher.recv_from_channel().await.unwrap(); + let update = chain_watcher.recv_from_channel().await.unwrap(); + + let l1_info = match update { + BlockUpdate::NewBlock(block) => *block, + _ => panic!("wrong update type"), + }; + + pipeline + .push_batcher_transactions( + l1_info.batcher_transactions.clone(), + l1_info.block_info.number, ) .unwrap(); - chain_watcher.start().unwrap(); - - let state = Arc::new(RwLock::new(State::new( - config.chain.l2_genesis, - config.chain.l1_start_epoch, - config.clone(), - ))); - - let mut pipeline = Pipeline::new(state.clone(), config.clone(), 0).unwrap(); + state.write().unwrap().update_l1_info(l1_info); - chain_watcher.recv_from_channel().await.unwrap(); - let update = chain_watcher.recv_from_channel().await.unwrap(); + if let Some(payload) = pipeline.next() { + let hashes = get_tx_hashes(&payload.transactions.unwrap()); + let expected_hashes = get_expected_hashes(config.chain.genesis.l2.number + 1).await; - let l1_info = match update { - BlockUpdate::NewBlock(block) => *block, - _ => panic!("wrong update type"), - }; - - pipeline - .push_batcher_transactions( - l1_info.batcher_transactions.clone(), - l1_info.block_info.number, - ) - .unwrap(); - - state.write().unwrap().update_l1_info(l1_info); - - if let Some(payload) = pipeline.next() { - let hashes = get_tx_hashes(&payload.transactions.unwrap()); - let expected_hashes = get_expected_hashes(config.chain.l2_genesis.number + 1).await; - - assert_eq!(hashes, expected_hashes); - } + assert_eq!(hashes, expected_hashes); } } diff --git a/src/derive/stages/attributes.rs b/src/derive/stages/attributes.rs index 257bbe6f..f5172699 100644 --- a/src/derive/stages/attributes.rs +++ b/src/derive/stages/attributes.rs @@ -1,26 +1,25 @@ use std::sync::{Arc, RwLock}; -use ethers::abi::{decode, encode, ParamType, Token}; -use ethers::types::{Address, Log, H256, U256, U64}; -use ethers::utils::{keccak256, rlp::Encodable, rlp::RlpStream}; +use ethers::types::{H256, U64}; +use ethers::utils::rlp::Encodable; -use eyre::Result; - -use crate::common::{Epoch, RawTransaction}; -use crate::config::{Config, SystemAccounts}; +use crate::config::SystemAccounts; use crate::derive::state::State; use crate::derive::PurgeableIterator; use crate::engine::PayloadAttributes; use crate::l1::L1Info; +use crate::types::attributes::{AttributesDeposited, DepositedTransaction}; +use crate::types::{attributes::RawTransaction, common::Epoch}; use super::batches::Batch; pub struct Attributes { batch_iter: Box>, state: Arc>, - sequence_number: u64, + seq_num: u64, epoch_hash: H256, - config: Arc, + regolith_time: u64, + unsafe_seq_num: u64, } impl Iterator for Attributes { @@ -36,7 +35,7 @@ impl Iterator for Attributes { impl PurgeableIterator for Attributes { fn purge(&mut self) { self.batch_iter.purge(); - self.sequence_number = 0; + self.seq_num = 0; self.epoch_hash = self.state.read().unwrap().safe_epoch.hash; } } @@ -45,17 +44,60 @@ impl Attributes { pub fn new( batch_iter: Box>, state: Arc>, - config: Arc, - seq: u64, + regolith_time: u64, + seq_num: u64, + unsafe_seq_num: u64, ) -> Self { - let epoch_hash = state.read().unwrap().safe_epoch.hash; + let epoch_hash = state.read().expect("lock poisoned").safe_epoch.hash; Self { batch_iter, state, - sequence_number: seq, + seq_num, epoch_hash, - config, + regolith_time, + unsafe_seq_num, + } + } + + pub fn derive_attributes_for_epoch( + &mut self, + epoch: Epoch, + l1_info: &L1Info, + block_timestamp: u64, + ) -> PayloadAttributes { + let unsafe_epoch = self.state.read().expect("lock poisoned").unsafe_epoch; + + if unsafe_epoch.hash != epoch.hash { + self.unsafe_seq_num = 0; + } else { + self.unsafe_seq_num += 1; + } + + let timestamp = U64([block_timestamp]); + let l1_inclusion_block = Some(epoch.number); + let seq_number = Some(self.unsafe_seq_num); + let prev_randao = l1_info.block_info.mix_hash; + + let transactions = Some(self.derive_transactions( + block_timestamp, + vec![], + l1_info, + epoch.hash, + self.unsafe_seq_num, + )); + let suggested_fee_recipient = SystemAccounts::default().fee_vault; + + PayloadAttributes { + timestamp, + prev_randao, + suggested_fee_recipient, + transactions, + no_tx_pool: false, + gas_limit: U64([l1_info.system_config.gas_limit]), + epoch: Some(epoch), + l1_inclusion_block, + seq_number, } } @@ -76,9 +118,16 @@ impl Attributes { let timestamp = U64([batch.timestamp]); let l1_inclusion_block = Some(batch.l1_inclusion_block); - let seq_number = Some(self.sequence_number); + let seq_number = Some(self.seq_num); let prev_randao = l1_info.block_info.mix_hash; - let transactions = Some(self.derive_transactions(batch, l1_info)); + + let transactions = Some(self.derive_transactions( + batch.timestamp, + batch.transactions, + l1_info, + self.epoch_hash, + self.seq_num, + )); let suggested_fee_recipient = SystemAccounts::default().fee_vault; PayloadAttributes { @@ -86,26 +135,33 @@ impl Attributes { prev_randao, suggested_fee_recipient, transactions, - no_tx_pool: true, - gas_limit: U64([l1_info.system_config.gas_limit.as_u64()]), + no_tx_pool: false, + gas_limit: U64([l1_info.system_config.gas_limit]), epoch, l1_inclusion_block, seq_number, } } - fn derive_transactions(&self, batch: Batch, l1_info: &L1Info) -> Vec { + fn derive_transactions( + &self, + timestamp: u64, + batch_txs: Vec, + l1_info: &L1Info, + epoch_hash: H256, + seq: u64, + ) -> Vec { let mut transactions = Vec::new(); - let attributes_tx = self.derive_attributes_deposited(l1_info, batch.timestamp); + let attributes_tx = self.derive_attributes_deposited(l1_info, timestamp, seq); transactions.push(attributes_tx); - if self.sequence_number == 0 { - let mut user_deposited_txs = self.derive_user_deposited(); + if seq == 0 { + let mut user_deposited_txs = self.derive_user_deposited(epoch_hash); transactions.append(&mut user_deposited_txs); } - let mut rest = batch.transactions; + let mut rest = batch_txs; transactions.append(&mut rest); transactions @@ -115,18 +171,18 @@ impl Attributes { &self, l1_info: &L1Info, batch_timestamp: u64, + seq: u64, ) -> RawTransaction { - let seq = self.sequence_number; let attributes_deposited = - AttributesDeposited::from_block_info(l1_info, seq, batch_timestamp, &self.config); + AttributesDeposited::from_block_info(l1_info, seq, batch_timestamp, self.regolith_time); let attributes_tx = DepositedTransaction::from(attributes_deposited); RawTransaction(attributes_tx.rlp_bytes().to_vec()) } - fn derive_user_deposited(&self) -> Vec { - let state = self.state.read().unwrap(); + fn derive_user_deposited(&self, epoch_hash: H256) -> Vec { + let state = self.state.read().expect("lock poisoned"); state - .l1_info_by_hash(self.epoch_hash) + .l1_info_by_hash(epoch_hash) .map(|info| &info.user_deposits) .map(|deposits| { deposits @@ -142,208 +198,281 @@ impl Attributes { fn update_sequence_number(&mut self, batch_epoch_hash: H256) { if self.epoch_hash != batch_epoch_hash { - self.sequence_number = 0; + self.seq_num = 0; } else { - self.sequence_number += 1; + self.seq_num += 1; } self.epoch_hash = batch_epoch_hash; } } -#[derive(Debug)] -struct DepositedTransaction { - source_hash: H256, - from: Address, - to: Option
, - mint: U256, - value: U256, - gas: u64, - is_system_tx: bool, - data: Vec, -} - -impl From for DepositedTransaction { - fn from(attributes_deposited: AttributesDeposited) -> Self { - let hash = attributes_deposited.hash.to_fixed_bytes(); - let seq = H256::from_low_u64_be(attributes_deposited.sequence_number).to_fixed_bytes(); - let h = keccak256([hash, seq].concat()); - - let domain = H256::from_low_u64_be(1).to_fixed_bytes(); - let source_hash = H256::from_slice(&keccak256([domain, h].concat())); - - let system_accounts = SystemAccounts::default(); - let from = system_accounts.attributes_depositor; - let to = Some(system_accounts.attributes_predeploy); - - let data = attributes_deposited.encode(); - - Self { - source_hash, - from, - to, - mint: U256::zero(), - value: U256::zero(), - gas: attributes_deposited.gas, - is_system_tx: attributes_deposited.is_system_tx, - data, - } - } -} - -impl From for DepositedTransaction { - fn from(user_deposited: UserDeposited) -> Self { - let hash = user_deposited.l1_block_hash.to_fixed_bytes(); - let log_index = user_deposited.log_index.into(); - let h = keccak256([hash, log_index].concat()); - - let domain = H256::from_low_u64_be(0).to_fixed_bytes(); - let source_hash = H256::from_slice(&keccak256([domain, h].concat())); - - let to = if user_deposited.is_creation { - None - } else { - Some(user_deposited.to) +#[cfg(test)] +mod tests { + use crate::config::{ChainConfig, SystemAccounts}; + use crate::derive::stages::attributes::Attributes; + use crate::derive::stages::batcher_transactions::BatcherTransactions; + use crate::derive::stages::batches::Batches; + use crate::derive::stages::channels::Channels; + use crate::derive::state::State; + use crate::l1::{L1BlockInfo, L1Info}; + use crate::types::attributes::{DepositedTransaction, UserDeposited}; + use crate::types::common::{BlockInfo, Epoch}; + use ethers::types::{Address, H256, U256, U64}; + use ethers::utils::rlp::Rlp; + use std::sync::{mpsc, Arc, RwLock}; + use std::time::{SystemTime, UNIX_EPOCH}; + + #[test] + fn test_derive_attributes_for_epoch_same_epoch() { + // Let's say we just started, the unsafe/safe/finalized heads are same. + // New block would be generated in the same epoch. + // Prepare required state. + let chain = ChainConfig::optimism_goerli(); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + let block_info = BlockInfo { + hash: H256::random(), + number: 0, + parent_hash: H256::random(), + timestamp: now, + }; + let epoch = Epoch { + number: 0, + hash: H256::random(), + timestamp: now, }; - Self { - source_hash, - from: user_deposited.from, - to, - mint: user_deposited.mint, - value: user_deposited.value, - gas: user_deposited.gas, - is_system_tx: false, - data: user_deposited.data, - } - } -} - -impl Encodable for DepositedTransaction { - fn rlp_append(&self, s: &mut RlpStream) { - s.append_raw(&[0x7E], 1); - s.begin_list(8); - s.append(&self.source_hash); - s.append(&self.from); - - if let Some(to) = self.to { - s.append(&to); - } else { - s.append(&""); - } + let l1_info = L1Info { + block_info: L1BlockInfo { + number: epoch.number, + hash: epoch.hash, + timestamp: epoch.timestamp, + parent_hash: H256::zero(), + base_fee: U256::zero(), + mix_hash: H256::zero(), + }, + system_config: chain.genesis.system_config, + user_deposits: vec![], + batcher_transactions: vec![], + finalized: false, + }; - s.append(&self.mint); - s.append(&self.value); - s.append(&self.gas); - s.append(&self.is_system_tx); - s.append(&self.data); + let state = Arc::new(RwLock::new(State::new( + block_info, + epoch, + block_info, + epoch, + chain.seq_window_size, + ))); + + state.write().unwrap().update_l1_info(l1_info.clone()); + + let (_tx, rx) = mpsc::channel(); + let batcher_transactions = BatcherTransactions::new(rx); + let channels = Channels::new( + batcher_transactions, + chain.max_channel_size, + chain.channel_timeout, + ); + let batches = Batches::new( + channels, + state.clone(), + chain.seq_window_size, + chain.max_sequencer_drift, + chain.block_time, + ); + + let mut attributes = Attributes::new(Box::new(batches), state, chain.regolith_time, 0, 0); + let attrs = attributes.derive_attributes_for_epoch(epoch, &l1_info, now + 2); + + // Check fields. + assert_eq!(attrs.timestamp, (now + 2).into(), "timestamp doesn't match"); + assert_eq!( + attrs.prev_randao, l1_info.block_info.mix_hash, + "prev rando doesn't match" + ); + assert_eq!( + attrs.suggested_fee_recipient, + SystemAccounts::default().fee_vault, + "fee recipient doesn't match" + ); + assert!(!attrs.no_tx_pool, "no tx pool doesn't match"); + assert_eq!( + attrs.gas_limit, + U64([l1_info.system_config.gas_limit]), + "gas limit doesn't match" + ); + assert!(attrs.epoch.is_some(), "epoch missed"); + assert_eq!(attrs.epoch.unwrap(), epoch, "epoch doesn't match"); + assert!( + attrs.l1_inclusion_block.is_some(), + "l1 inclusion block missed" + ); + assert_eq!( + attrs.l1_inclusion_block.unwrap(), + 0, + "wrong l1 inclusion block" + ); + assert!(attrs.seq_number.is_some(), "seq number missed"); + assert_eq!(attrs.seq_number.unwrap(), 1, "wrong sequence number"); + + // Check transactions. + assert!(attrs.transactions.is_some(), "missed transactions"); + let transactions = attrs.transactions.unwrap(); + assert_eq!(transactions.len(), 1, "wrong transactions length"); + + let (deposited_epoch, seq_number) = + transactions.first().unwrap().derive_unsafe_epoch().unwrap(); + assert_eq!( + deposited_epoch, epoch, + "wrong epoch in deposited transaction" + ); + assert_eq!(attrs.seq_number.unwrap(), seq_number); } -} - -#[derive(Debug)] -struct AttributesDeposited { - number: u64, - timestamp: u64, - base_fee: U256, - hash: H256, - sequence_number: u64, - batcher_hash: H256, - fee_overhead: U256, - fee_scalar: U256, - gas: u64, - is_system_tx: bool, -} -impl AttributesDeposited { - fn from_block_info(l1_info: &L1Info, seq: u64, batch_timestamp: u64, config: &Config) -> Self { - let is_regolith = batch_timestamp >= config.chain.regolith_time; - let is_system_tx = !is_regolith; - - let gas = if is_regolith { 1_000_000 } else { 150_000_000 }; + #[test] + fn test_derive_attributes_for_epoch_new_epoch() { + // Now let's say we will generate a payload in a new epoch. + // Must contain deposit transactions. + let chain = ChainConfig::optimism_goerli(); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + let block_info = BlockInfo { + hash: H256::random(), + number: 0, + parent_hash: H256::random(), + timestamp: now, + }; + let epoch = Epoch { + number: 0, + hash: H256::random(), + timestamp: now, + }; - Self { - number: l1_info.block_info.number, - timestamp: l1_info.block_info.timestamp, - base_fee: l1_info.block_info.base_fee, - hash: l1_info.block_info.hash, - sequence_number: seq, - batcher_hash: l1_info.system_config.batcher_hash(), - fee_overhead: l1_info.system_config.l1_fee_overhead, - fee_scalar: l1_info.system_config.l1_fee_scalar, - gas, - is_system_tx, - } - } + let l1_block_num = 1; + let l1_block_hash = H256::random(); - fn encode(&self) -> Vec { - let tokens = vec![ - Token::Uint(self.number.into()), - Token::Uint(self.timestamp.into()), - Token::Uint(self.base_fee), - Token::FixedBytes(self.hash.as_fixed_bytes().to_vec()), - Token::Uint(self.sequence_number.into()), - Token::FixedBytes(self.batcher_hash.as_fixed_bytes().to_vec()), - Token::Uint(self.fee_overhead), - Token::Uint(self.fee_scalar), - ]; - - let selector = hex::decode("015d8eb9").unwrap(); - let data = encode(&tokens); - - [selector, data].concat() - } -} + let new_epoch = Epoch { + number: l1_block_num, + hash: l1_block_hash, + timestamp: now + 2, + }; -#[derive(Debug, Clone)] -pub struct UserDeposited { - pub from: Address, - pub to: Address, - pub mint: U256, - pub value: U256, - pub gas: u64, - pub is_creation: bool, - pub data: Vec, - pub l1_block_num: u64, - pub l1_block_hash: H256, - pub log_index: U256, -} + let user_deposited = UserDeposited { + from: Address::random(), + to: Address::random(), + mint: U256::zero(), + value: U256::from(10), + gas: 10000, + is_creation: false, + data: vec![], + l1_block_num, + l1_block_hash, + log_index: U256::zero(), + }; -impl TryFrom for UserDeposited { - type Error = eyre::Report; + let l1_info = L1Info { + block_info: L1BlockInfo { + number: new_epoch.number, + hash: new_epoch.hash, + timestamp: new_epoch.timestamp, + parent_hash: H256::zero(), + base_fee: U256::zero(), + mix_hash: H256::zero(), + }, + system_config: chain.genesis.system_config, + user_deposits: vec![user_deposited.clone()], + batcher_transactions: vec![], + finalized: false, + }; - fn try_from(log: Log) -> Result { - let opaque_data = decode(&[ParamType::Bytes], &log.data)?[0] - .clone() - .into_bytes() + let state = Arc::new(RwLock::new(State::new( + block_info, + epoch, + block_info, + epoch, + chain.seq_window_size, + ))); + + state.write().unwrap().update_l1_info(l1_info.clone()); + + let (_tx, rx) = mpsc::channel(); + let batcher_transactions = BatcherTransactions::new(rx); + let channels = Channels::new( + batcher_transactions, + chain.max_channel_size, + chain.channel_timeout, + ); + let batches = Batches::new( + channels, + state.clone(), + chain.seq_window_size, + chain.max_sequencer_drift, + chain.block_time, + ); + + let mut attributes = Attributes::new(Box::new(batches), state, chain.regolith_time, 0, 0); + let attrs = attributes.derive_attributes_for_epoch(new_epoch, &l1_info, now + 2); + + // Check fields. + assert_eq!(attrs.timestamp, (now + 2).into(), "timestamp doesn't match"); + assert_eq!( + attrs.prev_randao, l1_info.block_info.mix_hash, + "prev rando doesn't match" + ); + assert_eq!( + attrs.suggested_fee_recipient, + SystemAccounts::default().fee_vault, + "fee recipient doesn't match" + ); + assert!(!attrs.no_tx_pool, "no tx pool doesn't match"); + assert_eq!( + attrs.gas_limit, + U64([l1_info.system_config.gas_limit]), + "gas limit doesn't match" + ); + assert!(attrs.epoch.is_some(), "epoch missed"); + assert_eq!(attrs.epoch.unwrap(), new_epoch, "epoch doesn't match"); + assert!( + attrs.l1_inclusion_block.is_some(), + "l1 inclusion block missed" + ); + assert_eq!( + attrs.l1_inclusion_block.unwrap(), + 1, + "wrong l1 inclusion block" + ); + assert!(attrs.seq_number.is_some(), "seq number missed"); + assert_eq!(attrs.seq_number.unwrap(), 0, "wrong sequence number"); + + // Check transactions. + assert!(attrs.transactions.is_some(), "missed transactions"); + let transactions = attrs.transactions.unwrap(); + assert_eq!(transactions.len(), 2, "wrong transactions length"); + + let (deposited_epoch, seq_number) = + transactions.get(0).unwrap().derive_unsafe_epoch().unwrap(); + assert_eq!( + deposited_epoch, new_epoch, + "wrong epoch in deposited transaction" + ); + assert_eq!(attrs.seq_number.unwrap(), seq_number); + + let deposited_tx_raw = transactions.get(1).unwrap(); + let deposited_tx = Rlp::new(&deposited_tx_raw.0) + .as_val::() .unwrap(); - - let from = Address::try_from(log.topics[1])?; - let to = Address::try_from(log.topics[2])?; - let mint = U256::from_big_endian(&opaque_data[0..32]); - let value = U256::from_big_endian(&opaque_data[32..64]); - let gas = u64::from_be_bytes(opaque_data[64..72].try_into()?); - let is_creation = opaque_data[72] != 0; - let data = opaque_data[73..].to_vec(); - - let l1_block_num = log - .block_number - .ok_or(eyre::eyre!("block num not found"))? - .as_u64(); - - let l1_block_hash = log.block_hash.ok_or(eyre::eyre!("block hash not found"))?; - let log_index = log.log_index.unwrap(); - - Ok(Self { - from, - to, - mint, - value, - gas, - is_creation, - data, - l1_block_num, - l1_block_hash, - log_index, - }) + let deposited_tx_from = DepositedTransaction::from(user_deposited); + assert_eq!( + deposited_tx, deposited_tx_from, + "transaction with deposit doesn't match" + ); } } diff --git a/src/derive/stages/batches.rs b/src/derive/stages/batches.rs index 03b195ab..1a67e430 100644 --- a/src/derive/stages/batches.rs +++ b/src/derive/stages/batches.rs @@ -10,10 +10,9 @@ use ethers::utils::rlp::{DecoderError, Rlp}; use eyre::Result; use libflate::zlib::Decoder; -use crate::common::RawTransaction; -use crate::config::Config; use crate::derive::state::State; use crate::derive::PurgeableIterator; +use crate::types::attributes::RawTransaction; use super::channels::Channel; @@ -22,7 +21,9 @@ pub struct Batches { batches: BTreeMap, channel_iter: I, state: Arc>, - config: Arc, + seq_window_size: u64, + max_seq_drift: u64, + blocktime: u64, } impl Iterator for Batches @@ -50,12 +51,20 @@ where } impl Batches { - pub fn new(channel_iter: I, state: Arc>, config: Arc) -> Self { + pub fn new( + channel_iter: I, + state: Arc>, + seq_window_size: u64, + max_seq_drift: u64, + blocktime: u64, + ) -> Self { Self { batches: BTreeMap::new(), channel_iter, state, - config, + seq_window_size, + max_seq_drift, + blocktime, } } } @@ -108,11 +117,17 @@ where let safe_head = state.safe_head; let epoch = state.safe_epoch; let next_epoch = state.epoch_by_number(epoch.number + 1); - let seq_window_size = self.config.chain.seq_window_size; + let seq_window_size = self.seq_window_size; if let Some(next_epoch) = next_epoch { if current_l1_block > epoch.number + seq_window_size { - let next_timestamp = safe_head.timestamp + self.config.chain.blocktime; + tracing::warn!( + "create an empty block because of sequence window: {} {}", + current_l1_block, + epoch.number + ); + + let next_timestamp = safe_head.timestamp + self.blocktime; let epoch = if next_timestamp < next_epoch.timestamp { epoch } else { @@ -145,7 +160,7 @@ where let epoch = state.safe_epoch; let next_epoch = state.epoch_by_number(epoch.number + 1); let head = state.safe_head; - let next_timestamp = head.timestamp + self.config.chain.blocktime; + let next_timestamp = head.timestamp + self.blocktime; // check timestamp range match batch.timestamp.cmp(&next_timestamp) { @@ -161,7 +176,7 @@ where } // check the inclusion delay - if batch.epoch_num + self.config.chain.seq_window_size < batch.l1_inclusion_block { + if batch.epoch_num + self.seq_window_size < batch.l1_inclusion_block { tracing::warn!("inclusion window elapsed"); return BatchStatus::Drop; } @@ -188,7 +203,7 @@ where } // handle sequencer drift - if batch.timestamp > batch_origin.timestamp + self.config.chain.max_seq_drift { + if batch.timestamp > batch_origin.timestamp + self.max_seq_drift { if batch.transactions.is_empty() { if epoch.number == batch.epoch_num { if let Some(next_epoch) = next_epoch { diff --git a/src/derive/stages/channels.rs b/src/derive/stages/channels.rs index c43bb160..40b987c5 100644 --- a/src/derive/stages/channels.rs +++ b/src/derive/stages/channels.rs @@ -1,7 +1,5 @@ -use std::sync::Arc; - use super::batcher_transactions::{BatcherTransaction, Frame}; -use crate::{config::Config, derive::PurgeableIterator}; +use crate::derive::PurgeableIterator; pub struct Channels { batcher_tx_iter: I, @@ -38,13 +36,13 @@ where } impl Channels { - pub fn new(batcher_tx_iter: I, config: Arc) -> Self { + pub fn new(batcher_tx_iter: I, max_channel_size: u64, channel_timeout: u64) -> Self { Self { batcher_tx_iter, pending_channels: Vec::new(), frame_bank: Vec::new(), - max_channel_size: config.chain.max_channel_size, - channel_timeout: config.chain.channel_timeout, + max_channel_size, + channel_timeout, } } } @@ -240,12 +238,12 @@ impl From for Channel { #[cfg(test)] mod tests { use crate::{ - config::{ChainConfig, Config}, + config::ChainConfig, derive::stages::batcher_transactions::{ BatcherTransactionMessage, BatcherTransactions, Frame, }, }; - use std::sync::{mpsc, Arc}; + use std::sync::mpsc; use super::Channels; @@ -388,20 +386,18 @@ mod tests { Channels, mpsc::Sender, ) { - let config = Config { - l1_rpc_url: String::new(), - l2_rpc_url: String::new(), - l2_engine_url: String::new(), - jwt_secret: String::new(), - rpc_port: 9545, - chain: ChainConfig::optimism_goerli(), - checkpoint_sync_url: None, - devnet: false, - }; - + let ChainConfig { + max_channel_size, + channel_timeout, + .. + } = ChainConfig::optimism_goerli(); let (tx, rx) = mpsc::channel(); ( - Channels::new(BatcherTransactions::new(rx), Arc::new(config)), + Channels::new( + BatcherTransactions::new(rx), + max_channel_size, + channel_timeout, + ), tx, ) } diff --git a/src/derive/state.rs b/src/derive/state.rs index eb7041cc..cd7375d5 100644 --- a/src/derive/state.rs +++ b/src/derive/state.rs @@ -1,11 +1,10 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; use ethers::types::H256; use crate::{ - common::{BlockInfo, Epoch}, - config::Config, l1::L1Info, + types::common::{BlockInfo, Epoch}, }; pub struct State { @@ -13,19 +12,29 @@ pub struct State { l1_hashes: BTreeMap, pub safe_head: BlockInfo, pub safe_epoch: Epoch, + pub unsafe_head: BlockInfo, + pub unsafe_epoch: Epoch, pub current_epoch_num: u64, - config: Arc, + seq_window_size: u64, } impl State { - pub fn new(finalized_head: BlockInfo, finalized_epoch: Epoch, config: Arc) -> Self { + pub fn new( + finalized_head: BlockInfo, + finalized_epoch: Epoch, + unsafe_head: BlockInfo, + unsafe_epoch: Epoch, + seq_window_size: u64, + ) -> Self { Self { l1_info: BTreeMap::new(), l1_hashes: BTreeMap::new(), safe_head: finalized_head, safe_epoch: finalized_epoch, + unsafe_head, + unsafe_epoch, current_epoch_num: 0, - config, + seq_window_size, } } @@ -39,6 +48,12 @@ impl State { .and_then(|hash| self.l1_info.get(hash)) } + pub fn l1_info_current(&self) -> Option<&L1Info> { + self.l1_hashes + .get(&self.current_epoch_num) + .and_then(|hash| self.l1_info.get(hash)) + } + pub fn epoch_by_hash(&self, hash: H256) -> Option { self.l1_info_by_hash(hash).map(|info| Epoch { number: info.block_info.number, @@ -72,18 +87,28 @@ impl State { self.safe_head = safe_head; self.safe_epoch = safe_epoch; + + self.unsafe_head = safe_head; + self.unsafe_epoch = safe_epoch; } pub fn update_safe_head(&mut self, safe_head: BlockInfo, safe_epoch: Epoch) { self.safe_head = safe_head; self.safe_epoch = safe_epoch; + + if self.safe_head.number > self.unsafe_head.number { + self.unsafe_head = safe_head; + self.unsafe_epoch = safe_epoch; + } + } + + pub fn update_unsafe_head(&mut self, unsafe_head: BlockInfo, unsafe_epoch: Epoch) { + self.unsafe_head = unsafe_head; + self.unsafe_epoch = unsafe_epoch; } fn prune(&mut self) { - let prune_until = self - .safe_epoch - .number - .saturating_sub(self.config.chain.seq_window_size); + let prune_until = self.safe_epoch.number.saturating_sub(self.seq_window_size); while let Some((block_num, block_hash)) = self.l1_hashes.first_key_value() { if *block_num >= prune_until { diff --git a/src/driver/engine_driver.rs b/src/driver/engine_driver.rs index 41ac6ced..7a8d11da 100644 --- a/src/driver/engine_driver.rs +++ b/src/driver/engine_driver.rs @@ -9,9 +9,9 @@ use ethers::{ use eyre::Result; use crate::{ - common::{BlockInfo, Epoch}, config::Config, engine::{Engine, EngineApi, ExecutionPayload, ForkchoiceState, PayloadAttributes, Status}, + types::common::{BlockInfo, Epoch, HeadInfo}, }; pub struct EngineDriver { @@ -21,16 +21,16 @@ pub struct EngineDriver { provider: Provider, /// Blocktime of the L2 chain blocktime: u64, - /// Most recent block found on the p2p network - pub unsafe_head: BlockInfo, - /// Most recent block that can be derived from L1 data - pub safe_head: BlockInfo, - /// Batch epoch of the safe head - pub safe_epoch: Epoch, - /// Most recent block that can be derived from finalized L1 data - pub finalized_head: BlockInfo, - /// Batch epoch of the finalized head - pub finalized_epoch: Epoch, + + /// The unsafe head info of the L2: blocks from P2P or sequencer. + pub unsafe_info: HeadInfo, + /// The safe head info of the L2: when referenced L1 block which are not finalized yet. + pub safe_info: HeadInfo, + /// The finalized head info of the L2: when referenced L1 block can't be reverted already. + pub finalized_info: HeadInfo, + + /// Engine sync head info. + pub sync_info: HeadInfo, } impl EngineDriver { @@ -41,7 +41,9 @@ impl EngineDriver { if should_skip(&block, &attributes)? { self.skip_attributes(attributes, block).await } else { - self.unsafe_head = self.safe_head; + self.unsafe_info = self.safe_info; + self.sync_info = self.safe_info; + self.process_attributes(attributes).await } } else { @@ -51,27 +53,29 @@ impl EngineDriver { pub async fn handle_unsafe_payload(&mut self, payload: &ExecutionPayload) -> Result<()> { self.push_payload(payload.clone()).await?; - self.unsafe_head = payload.into(); + + self.unsafe_info = payload.try_into()?; + self.sync_info = self.unsafe_info; + self.update_forkchoice().await?; tracing::info!( "head updated: {} {:?}", - self.unsafe_head.number, - self.unsafe_head.hash, + self.unsafe_info.head.number, + self.unsafe_info.head.hash, ); Ok(()) } - pub fn update_finalized(&mut self, head: BlockInfo, epoch: Epoch) { - self.finalized_head = head; - self.finalized_epoch = epoch; + pub fn update_finalized(&mut self, head: BlockInfo, epoch: Epoch, seq_number: u64) { + self.finalized_info = HeadInfo::new(head, epoch, seq_number) } pub fn reorg(&mut self) { - self.unsafe_head = self.finalized_head; - self.safe_head = self.finalized_head; - self.safe_epoch = self.finalized_epoch; + self.unsafe_info = self.finalized_info; + self.safe_info = self.finalized_info; + self.sync_info = self.finalized_info; } pub async fn engine_ready(&self) -> bool { @@ -84,6 +88,7 @@ impl EngineDriver { async fn process_attributes(&mut self, attributes: PayloadAttributes) -> Result<()> { let new_epoch = *attributes.epoch.as_ref().unwrap(); + let seq_number = attributes.seq_number.unwrap(); let payload = self.build_payload(attributes).await?; @@ -95,7 +100,7 @@ impl EngineDriver { }; self.push_payload(payload).await?; - self.update_safe_head(new_head, new_epoch, true)?; + self.update_safe_head(new_head, new_epoch, seq_number, true)?; self.update_forkchoice().await?; Ok(()) @@ -108,13 +113,14 @@ impl EngineDriver { ) -> Result<()> { let new_epoch = *attributes.epoch.as_ref().unwrap(); let new_head = BlockInfo::try_from(block)?; - self.update_safe_head(new_head, new_epoch, false)?; + + self.update_safe_head(new_head, new_epoch, attributes.seq_number.unwrap(), false)?; self.update_forkchoice().await?; Ok(()) } - async fn build_payload(&self, attributes: PayloadAttributes) -> Result { + pub async fn build_payload(&self, attributes: PayloadAttributes) -> Result { let forkchoice = self.create_forkchoice_state(); let update = self @@ -129,7 +135,6 @@ impl EngineDriver { let id = update .payload_id .ok_or(eyre::eyre!("engine did not return payload id"))?; - self.engine.get_payload(id).await } @@ -160,15 +165,17 @@ impl EngineDriver { &mut self, new_head: BlockInfo, new_epoch: Epoch, + new_seq_number: u64, reorg_unsafe: bool, ) -> Result<()> { - if self.safe_head != new_head { - self.safe_head = new_head; - self.safe_epoch = new_epoch; + if self.safe_info.head != new_head { + self.safe_info = HeadInfo::new(new_head, new_epoch, new_seq_number); + self.sync_info = self.safe_info; } - if reorg_unsafe || self.safe_head.number > self.unsafe_head.number { - self.unsafe_head = new_head; + if reorg_unsafe || self.safe_info.head.number > self.unsafe_info.head.number { + self.unsafe_info = HeadInfo::new(new_head, new_epoch, new_seq_number); + self.sync_info = self.unsafe_info; } Ok(()) @@ -176,16 +183,16 @@ impl EngineDriver { fn create_forkchoice_state(&self) -> ForkchoiceState { ForkchoiceState { - head_block_hash: self.unsafe_head.hash, - safe_block_hash: self.safe_head.hash, - finalized_block_hash: self.finalized_head.hash, + head_block_hash: self.unsafe_info.head.hash, + safe_block_hash: self.safe_info.head.hash, + finalized_block_hash: self.finalized_info.head.hash, } } async fn block_at(&self, timestamp: u64) -> Option> { - let time_diff = timestamp as i64 - self.finalized_head.timestamp as i64; + let time_diff = timestamp as i64 - self.finalized_info.head.timestamp as i64; let blocks = time_diff / self.blocktime as i64; - let block_num = self.finalized_head.number as i64 + blocks; + let block_num = self.finalized_info.head.number as i64 + blocks; self.provider .get_block_with_txs(block_num as u64) .await @@ -230,8 +237,9 @@ fn should_skip(block: &Block, attributes: &PayloadAttributes) -> Re impl EngineDriver { pub fn new( - finalized_head: BlockInfo, - finalized_epoch: Epoch, + finalized_info: HeadInfo, + safe_info: HeadInfo, + unsafe_info: HeadInfo, provider: Provider, config: &Arc, ) -> Result { @@ -240,12 +248,11 @@ impl EngineDriver { Ok(Self { engine, provider, - blocktime: config.chain.blocktime, - unsafe_head: finalized_head, - safe_head: finalized_head, - safe_epoch: finalized_epoch, - finalized_head, - finalized_epoch, + blocktime: config.chain.block_time, + finalized_info, + safe_info, + unsafe_info, + sync_info: unsafe_info, }) } } diff --git a/src/driver/info.rs b/src/driver/info.rs index d7b21e0a..6de3d078 100644 --- a/src/driver/info.rs +++ b/src/driver/info.rs @@ -1,9 +1,11 @@ -use crate::config::Config; -use crate::driver::types::HeadInfo; +use crate::config::ChainConfig; +use crate::types::common::HeadInfo; use ethers::middleware::Middleware; use ethers::providers::{JsonRpcClient, Provider, ProviderError}; use ethers::types::{Block, BlockId, BlockNumber, Transaction}; +use eyre::Result; + #[async_trait::async_trait] pub trait InnerProvider { async fn get_block_with_txs( @@ -35,28 +37,71 @@ impl<'a, P: JsonRpcClient> InnerProvider for HeadInfoFetcher<'a, P> { pub struct HeadInfoQuery {} impl HeadInfoQuery { - pub async fn get_head_info(p: &P, config: &Config) -> HeadInfo { - p.get_block_with_txs(BlockId::Number(BlockNumber::Finalized)) + async fn get_head_info(p: &P, block_number: BlockNumber) -> Option { + p.get_block_with_txs(BlockId::Number(block_number)) .await .ok() .flatten() .and_then(|block| HeadInfo::try_from(block).ok()) + } + + pub async fn get_finalized(p: &P, chain: &ChainConfig) -> HeadInfo { + let block_number = BlockNumber::Finalized; + Self::get_head_info(p, block_number) + .await + .unwrap_or_else(|| { + tracing::warn!( + "could not get {} head info. Falling back to the genesis head.", + block_number + ); + HeadInfo { + head: chain.l2_genesis(), + epoch: chain.l1_start_epoch(), + seq_number: 0, + } + }) + } + + pub async fn get_safe(p: &P, chain: &ChainConfig) -> HeadInfo { + let block_number = BlockNumber::Safe; + Self::get_head_info(p, block_number) + .await .unwrap_or_else(|| { - tracing::warn!("could not get head info. Falling back to the genesis head."); + tracing::warn!( + "could not get {} head info. Falling back to the genesis head.", + block_number + ); HeadInfo { - l2_block_info: config.chain.l2_genesis, - l1_epoch: config.chain.l1_start_epoch, - sequence_number: 0, + head: chain.l2_genesis(), + epoch: chain.l1_start_epoch(), + seq_number: 0, + } + }) + } + + pub async fn get_unsafe(p: &P, chain: &ChainConfig) -> HeadInfo { + let block_number = BlockNumber::Latest; + Self::get_head_info(p, block_number) + .await + .unwrap_or_else(|| { + tracing::warn!( + "could not get {} head info. Falling back to the genesis head.", + block_number + ); + HeadInfo { + head: chain.l2_genesis(), + epoch: chain.l1_start_epoch(), + seq_number: 0, } }) } } -#[cfg(all(test, feature = "test-utils"))] +#[allow(dead_code)] +#[cfg(any(test, feature = "test-utils"))] mod test_utils { use super::*; - use crate::common::{BlockInfo, Epoch}; - use crate::config::{ChainConfig, Config}; + use crate::types::common::{BlockInfo, Epoch}; use ethers::types::H256; use std::str::FromStr; @@ -70,7 +115,7 @@ mod test_utils { pub fn default_head_info() -> HeadInfo { HeadInfo { - l2_block_info: BlockInfo { + head: BlockInfo { hash: H256::from_str( "dbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3", ) @@ -82,7 +127,7 @@ mod test_utils { .unwrap(), timestamp: 1686068903, }, - l1_epoch: Epoch { + epoch: Epoch { number: 17422590, hash: H256::from_str( "438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108", @@ -90,7 +135,7 @@ mod test_utils { .unwrap(), timestamp: 1686068903, }, - sequence_number: 0, + seq_number: 0, } } @@ -123,19 +168,6 @@ mod test_utils { serde_json::from_str(raw_block).ok() } - pub fn optimism_config() -> Config { - Config { - l1_rpc_url: Default::default(), - l2_rpc_url: Default::default(), - l2_engine_url: Default::default(), - chain: ChainConfig::optimism(), - jwt_secret: Default::default(), - checkpoint_sync_url: Default::default(), - rpc_port: Default::default(), - devnet: false, - } - } - #[async_trait::async_trait] impl InnerProvider for MockProvider { async fn get_block_with_txs( @@ -154,24 +186,24 @@ mod tests { #[tokio::test] async fn test_get_head_info_fails() { let provider = test_utils::mock_provider(None); - let config = test_utils::optimism_config(); - let head_info = HeadInfoQuery::get_head_info(&provider, &config).await; + let chain = ChainConfig::optimism(); + let head_info = HeadInfoQuery::get_finalized(&provider, &chain).await; assert_eq!(test_utils::default_head_info(), head_info); } #[tokio::test] async fn test_get_head_info_empty_block() { let provider = test_utils::mock_provider(Some(Block::default())); - let config = test_utils::optimism_config(); - let head_info = HeadInfoQuery::get_head_info(&provider, &config).await; + let chain = ChainConfig::optimism(); + let head_info = HeadInfoQuery::get_finalized(&provider, &chain).await; assert_eq!(test_utils::default_head_info(), head_info); } #[tokio::test] async fn test_get_head_info_valid_block() { let provider = test_utils::mock_provider(test_utils::valid_block()); - let config = test_utils::optimism_config(); - let head_info = HeadInfoQuery::get_head_info(&provider, &config).await; + let chain = ChainConfig::optimism(); + let head_info = HeadInfoQuery::get_finalized(&provider, &chain).await; assert_eq!(test_utils::default_head_info(), head_info); } } diff --git a/src/driver/mod.rs b/src/driver/mod.rs index 4553d036..f9b8dc1f 100644 --- a/src/driver/mod.rs +++ b/src/driver/mod.rs @@ -1,37 +1,45 @@ use std::{ process, sync::{mpsc::Receiver, Arc, RwLock}, - time::Duration, + time::{Duration, SystemTime, UNIX_EPOCH}, }; +use arc_swap::ArcSwap; + use ethers::{ providers::{Http, Provider}, types::Address, }; use eyre::Result; + +use libp2p::gossipsub::IdentTopic; use reqwest::Url; +use thiserror::Error; use tokio::{ sync::watch::{self, Sender}, time::sleep, }; use crate::{ - common::{BlockInfo, Epoch}, - config::Config, + config::{Config, SequencerConfig}, derive::{state::State, Pipeline}, + driver::info::HeadInfoQuery, engine::{Engine, EngineApi, ExecutionPayload}, - l1::{BlockUpdate, ChainWatcher}, - network::{handlers::block_handler::BlockHandler, service::Service}, + l1::{BlockUpdate, ChainWatcher, L1Info}, + network::{ + handlers::{block_handler::BlockHandler, Handler}, + service::Service, + }, rpc, telemetry::metrics, + types::common::{BlockInfo, Epoch}, + types::rpc::SyncStatus, }; use self::engine_driver::EngineDriver; mod engine_driver; mod info; -mod types; -pub use types::*; /// Driver is responsible for advancing the execution node by feeding /// the derived chain into the engine API @@ -42,8 +50,12 @@ pub struct Driver { engine_driver: EngineDriver, /// List of unfinalized L2 blocks with their epochs, L1 inclusions, and sequence numbers unfinalized_blocks: Vec<(BlockInfo, Epoch, u64, u64)>, - /// Current finalized L1 block number - finalized_l1_block_number: u64, + /// Current finalized L1 block + finalized_l1_block: BlockInfo, + /// Current head L1 block + head_l1_block: BlockInfo, + /// Current safe L1 block + safe_l1_block: BlockInfo, /// List of unsafe blocks that have not been applied yet future_unsafe_blocks: Vec, /// State struct to keep track of global state @@ -58,8 +70,20 @@ pub struct Driver { unsafe_block_signer_sender: Sender
, /// Networking service network_service: Option, - /// Channel timeout length + /// Channel timeout length. channel_timeout: u64, + // Sender of the P2P broadcast channel. + p2p_sender: tokio::sync::mpsc::Sender, + // Receiver of the P2P broadcast channel. + p2p_receiver: Option>, + /// L2 Block time. + block_time: u64, + /// Max sequencer drift. + max_seq_drift: u64, + /// Sequener config. + sequencer_config: Option, + /// The Magi sync status. + sync_status: Arc>, } impl Driver { @@ -70,49 +94,68 @@ impl Driver { let http = Http::new_with_client(Url::parse(&config.l2_rpc_url)?, client); let provider = Provider::new(http); + let fetcher = info::HeadInfoFetcher::from(&provider); - let head = - info::HeadInfoQuery::get_head_info(&info::HeadInfoFetcher::from(&provider), &config) - .await; - - let finalized_head = head.l2_block_info; - let finalized_epoch = head.l1_epoch; - let finalized_seq = head.sequence_number; + let finalized_info = HeadInfoQuery::get_finalized(&fetcher, &config.chain).await; + let safe_info = HeadInfoQuery::get_safe(&fetcher, &config.chain).await; + let unsafe_info = HeadInfoQuery::get_unsafe(&fetcher, &config.chain).await; - tracing::info!("starting from head: {:?}", finalized_head.hash); + tracing::info!("starting finalized from head: {:?}", finalized_info.head); let l1_start_block = - get_l1_start_block(finalized_epoch.number, config.chain.channel_timeout); + get_l1_start_block(finalized_info.epoch.number, config.chain.channel_timeout); let config = Arc::new(config); - let chain_watcher = - ChainWatcher::new(l1_start_block, finalized_head.number, config.clone())?; + let chain_watcher: ChainWatcher = + ChainWatcher::new(l1_start_block, finalized_info.head.number, config.clone())?; let state = Arc::new(RwLock::new(State::new( - finalized_head, - finalized_epoch, - config.clone(), + finalized_info.head, + finalized_info.epoch, + unsafe_info.head, + unsafe_info.epoch, + config.chain.seq_window_size, ))); - let engine_driver = EngineDriver::new(finalized_head, finalized_epoch, provider, &config)?; - let pipeline = Pipeline::new(state.clone(), config.clone(), finalized_seq)?; + let sync_status = Arc::new(ArcSwap::from_pointee(Default::default())); - let _addr = rpc::run_server(config.clone()).await?; + let engine_driver = + EngineDriver::new(finalized_info, safe_info, unsafe_info, provider, &config)?; + let pipeline = Pipeline::new( + state.clone(), + &config.chain, + finalized_info.seq_number, + unsafe_info.seq_number, + )?; + + let _addr = rpc::run_server(config.clone(), sync_status.clone()).await?; let (unsafe_block_signer_sender, unsafe_block_signer_recv) = - watch::channel(config.chain.system_config.unsafe_block_signer); + watch::channel(config.chain.genesis.system_config.unsafe_block_signer); let (block_handler, unsafe_block_recv) = BlockHandler::new(config.chain.l2_chain_id, unsafe_block_signer_recv); - let service = Service::new("0.0.0.0:9876".parse()?, config.chain.l2_chain_id) - .add_handler(Box::new(block_handler)); + let service = Service::new( + config.p2p_listen, + config.chain.l2_chain_id, + config.p2p_bootnodes.clone(), + config.p2p_secret_key.clone(), + config.p2p_sequencer_secret_key.clone(), + IdentTopic::new(block_handler.topic().to_string()), + ) + .add_handler(Box::new(block_handler)); + + // channel for sending new blocks to peers + let (p2p_sender, p2p_receiver) = tokio::sync::mpsc::channel(1_000); Ok(Self { engine_driver, pipeline, unfinalized_blocks: Vec::new(), - finalized_l1_block_number: 0, + finalized_l1_block: Default::default(), + head_l1_block: Default::default(), + safe_l1_block: Default::default(), future_unsafe_blocks: Vec::new(), state, chain_watcher, @@ -121,10 +164,32 @@ impl Driver { unsafe_block_signer_sender, network_service: Some(service), channel_timeout: config.chain.channel_timeout, + p2p_receiver: Some(p2p_receiver), + p2p_sender, + block_time: config.chain.block_time, + max_seq_drift: config.chain.max_sequencer_drift, + sequencer_config: config.sequencer.clone(), + sync_status, }) } } +/// Custom error for sequencer. +#[derive(Debug, Error)] +enum SequencerErr { + #[error("out of sync with L1")] + OutOfSyncL1, + + #[error("past sequencer drift")] + PastSeqDrift, + + #[error("no next epoch available")] + NoNextEpoch, + + #[error("sequencer critical error: {0}")] + Critical(String), +} + impl Driver { /// Runs the Driver pub async fn start(&mut self) -> Result<()> { @@ -165,14 +230,139 @@ impl Driver { async fn advance(&mut self) -> Result<()> { self.advance_safe_head().await?; self.advance_unsafe_head().await?; + self.run_sequencer_step().await?; self.update_finalized(); + self.update_sync_status().await?; self.update_metrics(); + self.try_start_networking()?; Ok(()) } + /// Prepare data for generating next payload. + fn prepare_block_data( + &self, + unsafe_epoch: Epoch, + new_blocktime: u64, + ) -> Result<(Epoch, L1Info)> { + let state = self.state.read().expect("lock poisoned"); + + // Check we are in sync with L1. + let current_epoch = state + .epoch_by_number(unsafe_epoch.number) + .ok_or(SequencerErr::OutOfSyncL1)?; + + // Check past sequencer drift. + let is_seq_drift = new_blocktime > current_epoch.timestamp + self.max_seq_drift; + let next_epoch = state.epoch_by_number(current_epoch.number + 1); + + let origin_epoch = if let Some(next_epoch) = next_epoch { + if new_blocktime >= next_epoch.timestamp { + next_epoch + } else { + current_epoch + } + } else { + if is_seq_drift { + return Err(SequencerErr::PastSeqDrift.into()); + } + return Err(SequencerErr::NoNextEpoch.into()); + }; + + let l1_info = state + .l1_info_by_hash(origin_epoch.hash) + .ok_or(SequencerErr::Critical( + "can't find l1 info for origin epoch during block building".to_string(), + ))?; + + Ok((origin_epoch, l1_info.clone())) + } + + /// Runs the sequencer step. + /// Produces a block if the conditions are met. + /// If successful the block would be signed by sequencer and shared by P2P. + async fn run_sequencer_step(&mut self) -> Result<()> { + if let Some(seq_config) = self.sequencer_config.clone() { + // Get unsafe head to build a new block on top of it. + let head = self.engine_driver.unsafe_info.head; + let unsafe_epoch = self.engine_driver.unsafe_info.epoch; + + if seq_config.max_safe_lag() > 0 { + // Check max safe lag, and in case delay produce blocks. + if self.engine_driver.safe_info.head.number + seq_config.max_safe_lag() + <= head.number + { + tracing::debug!("max safe lag reached, waiting for safe block..."); + return Ok(()); + } + } + + // Next block timestamp. + let new_blocktime = head.timestamp + self.block_time; + + // Check if we can generate block and time passed. + let now = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs(); + if new_blocktime > now { + return Ok(()); + } + + // Prepare data (origin epoch and l1 information) for next block. + let (epoch, l1_info) = match self.prepare_block_data(unsafe_epoch, new_blocktime) { + Ok((epoch, l1_info)) => (epoch, l1_info), + Err(err) => match err.downcast()? { + SequencerErr::NoNextEpoch => return Ok(()), + SequencerErr::OutOfSyncL1 => { + tracing::debug!("out of sync L1 {:?}", unsafe_epoch); + return Ok(()); + } + SequencerErr::Critical(msg) => eyre::bail!(msg), + SequencerErr::PastSeqDrift => eyre::bail!( + "failed to find next L1 origin for new block under past sequencer drifted" + ), + }, + }; + + let block_num = head.number + 1; + tracing::info!( + "attempt to build a payload {} {} {:?}", + block_num, + new_blocktime, + epoch, + ); + + let mut attributes = + self.pipeline + .derive_attributes_for_epoch(epoch, &l1_info, new_blocktime); + + tracing::trace!("produced payload attributes {} {:?}", block_num, attributes); + + attributes.no_tx_pool = new_blocktime > epoch.timestamp + self.max_seq_drift; + + if attributes.no_tx_pool { + tracing::warn!("tx pool disabled because of max sequencer drift"); + } + + let payload = self.engine_driver.build_payload(attributes).await?; + + tracing::trace!("produced payload {} {:?}", block_num, payload); + + self.engine_driver.handle_unsafe_payload(&payload).await?; + self.p2p_sender.send(payload).await?; + + self.state + .write() + .expect("lock posioned") + .update_unsafe_head( + self.engine_driver.unsafe_info.head, + self.engine_driver.unsafe_info.epoch, + ); + } + + Ok(()) + } + /// Attempts to advance the execution node forward one L1 block using derived /// L1 data. Errors if the most recent PayloadAttributes from the pipeline /// does not successfully advance the node @@ -195,16 +385,16 @@ impl Driver { tracing::info!( "safe head updated: {} {:?}", - self.engine_driver.safe_head.number, - self.engine_driver.safe_head.hash, + self.engine_driver.safe_info.head.number, + self.engine_driver.safe_info.head.hash, ); - let new_safe_head = self.engine_driver.safe_head; - let new_safe_epoch = self.engine_driver.safe_epoch; + let new_safe_head = self.engine_driver.safe_info.head; + let new_safe_epoch = self.engine_driver.safe_info.epoch; self.state .write() - .map_err(|_| eyre::eyre!("lock poisoned"))? + .expect("lock poisoned") .update_safe_head(new_safe_head, new_safe_epoch); let unfinalized_entry = ( @@ -227,7 +417,7 @@ impl Driver { self.future_unsafe_blocks.retain(|payload| { let unsafe_block_num = payload.block_number.as_u64(); - let synced_block_num = self.engine_driver.unsafe_head.number; + let synced_block_num = self.engine_driver.unsafe_info.head.number; unsafe_block_num > synced_block_num && unsafe_block_num - synced_block_num < 1024 }); @@ -235,22 +425,32 @@ impl Driver { let next_unsafe_payload = self .future_unsafe_blocks .iter() - .find(|p| p.parent_hash == self.engine_driver.unsafe_head.hash); + .find(|p| p.parent_hash == self.engine_driver.unsafe_info.head.hash); if let Some(payload) = next_unsafe_payload { - _ = self.engine_driver.handle_unsafe_payload(payload).await; + if let Err(err) = self.engine_driver.handle_unsafe_payload(payload).await { + tracing::debug!("Error processing unsafe payload: {err}"); + } else { + self.state + .write() + .expect("lock poisoned") + .update_unsafe_head( + self.engine_driver.unsafe_info.head, + self.engine_driver.unsafe_info.epoch, + ); + } } Ok(()) } fn update_state_head(&self) -> Result<()> { - let mut state = self - .state - .write() - .map_err(|_| eyre::eyre!("lock poisoned"))?; + let mut state = self.state.write().expect("lock poisoned"); - state.update_safe_head(self.engine_driver.safe_head, self.engine_driver.safe_epoch); + state.update_safe_head( + self.engine_driver.safe_info.head, + self.engine_driver.safe_info.epoch, + ); Ok(()) } @@ -272,36 +472,32 @@ impl Driver { self.state .write() - .map_err(|_| eyre::eyre!("lock poisoned"))? + .expect("lock poisoned") .update_l1_info(*l1_info); } BlockUpdate::Reorg => { tracing::warn!("reorg detected, purging pipeline"); - self.unfinalized_blocks.clear(); + let finalized_info = self.engine_driver.finalized_info; - let l1_start_block = get_l1_start_block( - self.engine_driver.finalized_epoch.number, - self.channel_timeout, - ); + let l1_start_block = + get_l1_start_block(finalized_info.epoch.number, self.channel_timeout); + self.unfinalized_blocks.clear(); self.chain_watcher - .restart(l1_start_block, self.engine_driver.finalized_head.number)?; + .restart(l1_start_block, finalized_info.head.number)?; self.state .write() - .map_err(|_| eyre::eyre!("lock poisoned"))? - .purge( - self.engine_driver.finalized_head, - self.engine_driver.finalized_epoch, - ); + .expect("lock poisoned") + .purge(finalized_info.head, finalized_info.epoch); self.pipeline.purge()?; self.engine_driver.reorg(); } - BlockUpdate::FinalityUpdate(num) => { - self.finalized_l1_block_number = num; - } + BlockUpdate::FinalityUpdate(block) => self.finalized_l1_block = block, + BlockUpdate::HeadUpdate(block) => self.head_l1_block = block, + BlockUpdate::SafetyUpdate(block) => self.safe_l1_block = block, } } @@ -313,37 +509,74 @@ impl Driver { .unfinalized_blocks .iter() .filter(|(_, _, inclusion, seq)| { - *inclusion <= self.finalized_l1_block_number && *seq == 0 + *inclusion <= self.finalized_l1_block.number && *seq == 0 }) .last(); - if let Some((head, epoch, _, _)) = new_finalized { - self.engine_driver.update_finalized(*head, *epoch); + if let Some((head, epoch, _, seq)) = new_finalized { + self.engine_driver.update_finalized(*head, *epoch, *seq); } self.unfinalized_blocks - .retain(|(_, _, inclusion, _)| *inclusion > self.finalized_l1_block_number); + .retain(|(_, _, inclusion, _)| *inclusion > self.finalized_l1_block.number); } fn try_start_networking(&mut self) -> Result<()> { - if self.synced() { - if let Some(service) = self.network_service.take() { - service.start()?; - } + if let Some(service) = self.network_service.take() { + let p2p_receiver = self + .p2p_receiver + .take() + .expect("The channel is not initialized"); + service.start(p2p_receiver)?; } Ok(()) } fn update_metrics(&self) { - metrics::FINALIZED_HEAD.set(self.engine_driver.finalized_head.number as i64); - metrics::SAFE_HEAD.set(self.engine_driver.safe_head.number as i64); + metrics::FINALIZED_HEAD.set(self.engine_driver.finalized_info.head.number as i64); + metrics::SAFE_HEAD.set(self.engine_driver.safe_info.head.number as i64); metrics::SYNCED.set(self.synced() as i64); } fn synced(&self) -> bool { !self.unfinalized_blocks.is_empty() } + + async fn update_sync_status(&self) -> Result<()> { + let state = self.state.read().expect("lock poisoned"); + + let current_l1_info = state.l1_info_current(); + + if let Some(current_l1_info) = current_l1_info { + let finalized_l1 = self.finalized_l1_block; + let head_l1 = self.head_l1_block; + let safe_l1 = self.safe_l1_block; + let queued_unsafe_block = self.get_queued_unsafe_block(); + + let new_status = SyncStatus::new( + current_l1_info.block_info.into(), + finalized_l1, + head_l1, + safe_l1, + self.engine_driver.unsafe_info, + self.engine_driver.safe_info, + self.engine_driver.finalized_info, + queued_unsafe_block, + self.engine_driver.sync_info, + )?; + + self.sync_status.store(Arc::new(new_status)); + } + + Ok(()) + } + + fn get_queued_unsafe_block(&self) -> Option<&ExecutionPayload> { + self.future_unsafe_blocks + .iter() + .min_by_key(|payload| payload.block_number.as_u64()) + } } /// Retrieves the L1 start block number. @@ -354,50 +587,66 @@ fn get_l1_start_block(epoch_number: u64, channel_timeout: u64) -> u64 { #[cfg(test)] mod tests { - use std::{path::PathBuf, str::FromStr}; - use ethers::{ - providers::{Http, Middleware}, + middleware::Middleware, + providers::Http, types::{BlockId, BlockNumber}, }; use eyre::Result; use tokio::sync::watch::channel; - use crate::config::{ChainConfig, CliConfig}; + use crate::config::ChainConfig; use super::*; #[tokio::test] async fn test_new_driver_from_finalized_head() -> Result<()> { - if std::env::var("L1_TEST_RPC_URL").is_ok() && std::env::var("L2_TEST_RPC_URL").is_ok() { - let config_path = PathBuf::from_str("config.toml")?; - let rpc = std::env::var("L1_TEST_RPC_URL")?; - let l2_rpc = std::env::var("L2_TEST_RPC_URL")?; - let cli_config = CliConfig { - l1_rpc_url: Some(rpc.to_owned()), - l2_rpc_url: Some(l2_rpc.to_owned()), - l2_engine_url: None, - jwt_secret: Some( - "d195a64e08587a3f1560686448867220c2727550ce3e0c95c7200d0ade0f9167".to_owned(), - ), - checkpoint_sync_url: Some(l2_rpc.to_owned()), - rpc_port: None, - devnet: false, - }; - let config = Config::new(&config_path, cli_config, ChainConfig::optimism_goerli()); - let (_shutdown_sender, shutdown_recv) = channel(false); + let rpc_env = std::env::var("L1_TEST_RPC_URL"); + let l2_rpc_env = std::env::var("L2_TEST_RPC_URL"); + let (rpc, l2_rpc) = match (rpc_env, l2_rpc_env) { + (Ok(rpc), Ok(l2_rpc)) => (rpc, l2_rpc), + (rpc_env, l2_rpc_env) => { + eprintln!("Test ignored: `test_new_driver_from_finalized_head`, rpc: {rpc_env:?}, l2_rpc: {l2_rpc_env:?}"); + return Ok(()); + } + }; - let block_id = BlockId::Number(BlockNumber::Finalized); - let provider = Provider::::try_from(config.l2_rpc_url.clone())?; - let finalized_block = provider.get_block(block_id).await?.unwrap(); + // threshold for cases, when new blocks generated + let max_difference = 500; - let driver = Driver::from_config(config, shutdown_recv).await?; + let config = Config { + chain: ChainConfig::optimism_goerli(), + l1_rpc_url: rpc, + l2_rpc_url: l2_rpc.clone(), + jwt_secret: "d195a64e08587a3f1560686448867220c2727550ce3e0c95c7200d0ade0f9167" + .to_owned(), + checkpoint_sync_url: Some(l2_rpc), + ..Config::default() + }; + + let (_shutdown_sender, shutdown_recv) = channel(false); + + let block_id = BlockId::Number(BlockNumber::Finalized); + let provider = Provider::::try_from(config.l2_rpc_url.clone())?; + let finalized_block = provider.get_block(block_id).await?.unwrap(); + + let driver = Driver::from_config(config, shutdown_recv).await?; + + let finalized_head_num = driver.engine_driver.finalized_info.head.number; + let block_num = finalized_block.number.unwrap().as_u64(); + + let difference = if finalized_head_num > block_num { + finalized_head_num - block_num + } else { + block_num - finalized_head_num + }; + + assert!( + difference <= max_difference, + "Difference between finalized_head_num ({finalized_head_num}) and block_num ({block_num}) \ + exceeds the threshold of {max_difference}", + ); - assert_eq!( - driver.engine_driver.finalized_head.number, - finalized_block.number.unwrap().as_u64() - ); - } Ok(()) } } diff --git a/src/engine/api.rs b/src/engine/api.rs index f3e9cc91..e269e6b5 100644 --- a/src/engine/api.rs +++ b/src/engine/api.rs @@ -130,21 +130,18 @@ impl EngineApi { .as_ref() .ok_or(eyre::eyre!("Driver missing http client"))?; - // Clone the secret so we can use it in the retry policy. - let secret_clone = self.secret.clone(); + // Construct the JWT Authorization Token + let claims = self.secret.generate_claims(Some(SystemTime::now())); + let jwt = self + .secret + .encode(&claims) + .map_err(|_| eyre::eyre!("EngineApi failed to encode jwt with claims!"))?; let policy = RetryPolicy::fixed(Duration::ZERO).with_max_retries(5); // Send the request let res = policy .retry(|| async { - // Construct the JWT Authorization Token - let claims = secret_clone.generate_claims(Some(SystemTime::now())); - let jwt = secret_clone - .encode(&claims) - .map_err(|_| eyre::eyre!("EngineApi failed to encode jwt with claims!"))?; - - // Send the request client .post(&self.base_url) .header(header::AUTHORIZATION, format!("Bearer {}", jwt)) @@ -291,4 +288,49 @@ mod tests { // server.stop().unwrap(); // server.stopped().await; } + + #[tokio::test] + #[ignore] + async fn test_compare_payloads() { + let engine_api = EngineApi::new( + "http://127.0.0.1:5551", + "4700296286165daa27e9404669073a6b098e064d0f89d189033653157a277154", + ); + + let json_str = r#"[ + { + "finalizedBlockHash":"0xa3ab140f15ea7f7443a4702da64c10314eb04d488e72974e02e2d728096b4f76", + "headBlockHash":"0xa3ab140f15ea7f7443a4702da64c10314eb04d488e72974e02e2d728096b4f76", + "safeBlockHash":"0xa3ab140f15ea7f7443a4702da64c10314eb04d488e72974e02e2d728096b4f76" + }, + { + "gasLimit":"0x17d7840", + "noTxPool":true, + "prevRandao":"0xf003ba1768550abf36f554c23d6b3a120b6d3a4c454b981bf8cd5465fd2630c7", + "suggestedFeeRecipient":"0x4200000000000000000000000000000000000011", + "timestamp":"0x63d96d12", + "transactions":[ + "0x7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240" + ] + }]"#; + let params: Vec = serde_json::from_str(json_str).unwrap(); + println!("{:?}", params); + let res: ForkChoiceUpdate = engine_api + .post("engine_forkchoiceUpdatedV1", params) + .await + .unwrap(); + println!("{:?}", res); + + let payload_id = res.payload_id.unwrap(); + println!("payload id: {:?}", payload_id); + + let encoded = format!("{:x}", payload_id); + let padded = format!("0x{:0>16}", encoded); + let params = vec![Value::String(padded)]; + let res: ExecutionPayload = engine_api + .post("engine_getPayloadV1", params) + .await + .unwrap(); + println!("{:?}", res); + } } diff --git a/src/engine/payload.rs b/src/engine/payload.rs index a4107dba..2019f13f 100644 --- a/src/engine/payload.rs +++ b/src/engine/payload.rs @@ -2,10 +2,7 @@ use ethers::types::{Block, Bytes, Transaction, H160, H256, U64}; use eyre::Result; use serde::{Deserialize, Serialize}; -use crate::{ - common::{Epoch, RawTransaction}, - config::SystemAccounts, -}; +use crate::{config::SystemAccounts, types::attributes::RawTransaction, types::common::Epoch}; /// ## ExecutionPayload #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] @@ -160,26 +157,33 @@ mod tests { #[tokio::test] async fn test_from_block_hash_to_execution_paylaod() -> Result<()> { - if std::env::var("L1_TEST_RPC_URL").is_ok() && std::env::var("L2_TEST_RPC_URL").is_ok() { - let checkpoint_hash: H256 = - "0xc2794a16acacd9f7670379ffd12b6968ff98e2a602f57d7d1f880220aa5a4973".parse()?; - - let l2_rpc = std::env::var("L2_TEST_RPC_URL")?; - let checkpoint_sync_url = Provider::::try_from(l2_rpc)?; - let checkpoint_block = checkpoint_sync_url - .get_block_with_txs(checkpoint_hash) - .await? - .unwrap(); - - let payload = ExecutionPayload::try_from(checkpoint_block)?; - - assert_eq!( - payload.block_hash, - "0xc2794a16acacd9f7670379ffd12b6968ff98e2a602f57d7d1f880220aa5a4973".parse()? - ); - assert_eq!(payload.block_number, 8453214u64.into()); - assert_eq!(payload.base_fee_per_gas, 50u64.into()); - } + let l2_rpc = match std::env::var("L2_TEST_RPC_URL") { + Ok(l2_rpc) => l2_rpc, + l2_rpc_res => { + eprintln!( + "Test ignored: `test_from_block_hash_to_execution_paylaod`, l2_rpc: {l2_rpc_res:?}" + ); + return Ok(()); + } + }; + + let checkpoint_hash: H256 = + "0xc2794a16acacd9f7670379ffd12b6968ff98e2a602f57d7d1f880220aa5a4973".parse()?; + + let checkpoint_sync_url = Provider::::try_from(l2_rpc)?; + let checkpoint_block = checkpoint_sync_url + .get_block_with_txs(checkpoint_hash) + .await? + .unwrap(); + + let payload = ExecutionPayload::try_from(checkpoint_block)?; + + assert_eq!( + payload.block_hash, + "0xc2794a16acacd9f7670379ffd12b6968ff98e2a602f57d7d1f880220aa5a4973".parse()? + ); + assert_eq!(payload.block_number, 8453214u64.into()); + assert_eq!(payload.base_fee_per_gas, 50u64.into()); Ok(()) } diff --git a/src/l1/mod.rs b/src/l1/mod.rs index 2349a264..330fc412 100644 --- a/src/l1/mod.rs +++ b/src/l1/mod.rs @@ -15,9 +15,9 @@ use reqwest::Url; use tokio::{spawn, sync::mpsc, task::JoinHandle, time::sleep}; use crate::{ - common::BlockInfo, config::{Config, SystemConfig}, - derive::stages::attributes::UserDeposited, + types::attributes::UserDeposited, + types::common::BlockInfo, }; static CONFIG_UPDATE_TOPIC: Lazy = @@ -50,14 +50,18 @@ pub struct ChainWatcher { pub enum BlockUpdate { /// A new block extending the current chain NewBlock(Box), - /// Updates the most recent finalized block - FinalityUpdate(u64), + /// Updates the most recent finalized L1 block. + FinalityUpdate(BlockInfo), + /// L1 head update. + HeadUpdate(BlockInfo), + /// L1 safe block update. + SafetyUpdate(BlockInfo), /// Reorg detected Reorg, } /// Data tied to a specific L1 block -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct L1Info { /// L1 block data pub block_info: L1BlockInfo, @@ -72,7 +76,7 @@ pub struct L1Info { } /// L1 block info -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub struct L1BlockInfo { /// L1 block number pub number: u64, @@ -80,12 +84,25 @@ pub struct L1BlockInfo { pub hash: H256, /// L1 block timestamp pub timestamp: u64, + /// L1 block parent hash. + pub parent_hash: H256, /// L1 base fee per gas pub base_fee: U256, /// L1 mix hash (prevrandao) pub mix_hash: H256, } +impl From for BlockInfo { + fn from(l1_block: L1BlockInfo) -> Self { + Self { + hash: l1_block.hash, + number: l1_block.number, + parent_hash: l1_block.parent_hash, + timestamp: l1_block.timestamp, + } + } +} + /// Watcher actually ingests the L1 blocks. Should be run in another /// thread and called periodically to keep updating channels struct InnerWatcher { @@ -101,6 +118,8 @@ struct InnerWatcher { head_block: u64, /// Most recent finalized block finalized_block: u64, + /// Most recent safe block + safe_block: u64, /// List of blocks that have not been finalized yet unfinalized_blocks: Vec, /// Mapping from block number to user deposits. Past block deposits @@ -199,8 +218,8 @@ impl InnerWatcher { ) -> Self { let provider = generate_http_provider(&config.l1_rpc_url); - let system_config = if l2_start_block == config.chain.l2_genesis.number { - config.chain.system_config + let system_config = if l2_start_block == config.chain.genesis.l2.number { + config.chain.genesis.system_config } else { let l2_provider = generate_http_provider(&config.l2_rpc_url); @@ -212,23 +231,23 @@ impl InnerWatcher { let input = &block .transactions - .first() + .get(0) .expect( "Could not find the L1 attributes deposited transaction in the parent L2 block", ) .input; - let batch_sender = Address::from_slice(&input[176..196]); - let l1_fee_overhead = U256::from(H256::from_slice(&input[196..228]).as_bytes()); - let l1_fee_scalar = U256::from(H256::from_slice(&input[228..260]).as_bytes()); + let batcher_addr = Address::from_slice(&input[176..196]); + let overhead = U256::from(H256::from_slice(&input[196..228]).as_bytes()); + let scalar = U256::from(H256::from_slice(&input[228..260]).as_bytes()); SystemConfig { - batch_sender, - l1_fee_overhead, - l1_fee_scalar, - gas_limit: block.gas_limit, + batcher_addr, + overhead, + scalar, + gas_limit: block.gas_limit.as_u64(), // TODO: fetch from contract - unsafe_block_signer: config.chain.system_config.unsafe_block_signer, + unsafe_block_signer: config.chain.genesis.system_config.unsafe_block_signer, } }; @@ -239,6 +258,7 @@ impl InnerWatcher { current_block: l1_start_block, head_block: 0, finalized_block: 0, + safe_block: 0, unfinalized_blocks: Vec::new(), deposits: HashMap::new(), system_config, @@ -250,7 +270,7 @@ impl InnerWatcher { if self.current_block > self.finalized_block { let finalized_block = self.get_finalized().await?; - self.finalized_block = finalized_block; + self.finalized_block = finalized_block.number; self.block_update_sender .send(BlockUpdate::FinalityUpdate(finalized_block)) .await?; @@ -261,7 +281,20 @@ impl InnerWatcher { if self.current_block > self.head_block { let head_block = self.get_head().await?; - self.head_block = head_block; + self.head_block = head_block.number; + + self.block_update_sender + .send(BlockUpdate::HeadUpdate(head_block)) + .await?; + } + + if self.current_block > self.safe_block { + let safe_block = self.get_safe().await?; + self.safe_block = safe_block.number; + + self.block_update_sender + .send(BlockUpdate::SafetyUpdate(safe_block)) + .await?; } if self.current_block <= self.head_block { @@ -274,7 +307,7 @@ impl InnerWatcher { let l1_info = L1Info::new( &block, user_deposits, - self.config.chain.batch_inbox, + self.config.chain.batch_inbox_address, finalized, self.system_config, )?; @@ -312,7 +345,7 @@ impl InnerWatcher { if last_update_block < self.current_block { let to_block = last_update_block + 1000; let filter = Filter::new() - .address(self.config.chain.system_config_contract) + .address(self.config.chain.l1_system_config_address) .topic0(*CONFIG_UPDATE_TOPIC) .from_block(last_update_block + 1) .to_block(to_block); @@ -327,14 +360,14 @@ impl InnerWatcher { let mut config = self.system_config; match update { SystemConfigUpdate::BatchSender(addr) => { - config.batch_sender = addr; + config.batcher_addr = addr; } SystemConfigUpdate::Fees(overhead, scalar) => { - config.l1_fee_overhead = overhead; - config.l1_fee_scalar = scalar; + config.overhead = overhead; + config.scalar = scalar; } SystemConfigUpdate::Gas(gas) => { - config.gas_limit = gas; + config.gas_limit = gas.as_u64(); } SystemConfigUpdate::UnsafeBlockSigner(addr) => { config.unsafe_block_signer = addr; @@ -371,31 +404,44 @@ impl InnerWatcher { } } - async fn get_finalized(&self) -> Result { + async fn get_finalized(&self) -> Result { let block_number = match self.config.devnet { false => BlockNumber::Finalized, true => BlockNumber::Latest, }; - Ok(self + let block = self .provider .get_block(block_number) .await? - .ok_or(eyre::eyre!("block not found"))? - .number - .ok_or(eyre::eyre!("block pending"))? - .as_u64()) + .ok_or(eyre::eyre!("block not found"))?; + + BlockInfo::try_from(block) + } + + async fn get_safe(&self) -> Result { + let block_number = match self.config.devnet { + false => BlockNumber::Safe, + true => BlockNumber::Latest, + }; + + let block = self + .provider + .get_block(block_number) + .await? + .ok_or(eyre::eyre!("block not found"))?; + + BlockInfo::try_from(block) } - async fn get_head(&self) -> Result { - Ok(self + async fn get_head(&self) -> Result { + let block = self .provider .get_block(BlockNumber::Latest) .await? - .ok_or(eyre::eyre!("block not found"))? - .number - .ok_or(eyre::eyre!("block pending"))? - .as_u64()) + .ok_or(eyre::eyre!("block not found"))?; + + BlockInfo::try_from(block) } async fn get_block(&self, block_num: u64) -> Result> { @@ -412,7 +458,7 @@ impl InnerWatcher { let end_block = self.head_block.min(block_num + 1000); let deposit_filter = Filter::new() - .address(self.config.chain.deposit_contract) + .address(self.config.chain.deposit_contract_address) .topic0(*TRANSACTION_DEPOSITED_TOPIC) .from_block(block_num) .to_block(end_block); @@ -460,6 +506,7 @@ impl L1Info { number: block_number, hash: block_hash, timestamp: block.timestamp.as_u64(), + parent_hash: block.parent_hash, base_fee: block .base_fee_per_gas .ok_or(eyre::eyre!("block is pre london"))?, @@ -467,7 +514,7 @@ impl L1Info { }; let batcher_transactions = - create_batcher_transactions(block, system_config.batch_sender, batch_inbox); + create_batcher_transactions(block, system_config.batcher_addr, batch_inbox); Ok(L1Info { block_info, diff --git a/src/lib.rs b/src/lib.rs index cb55ed71..925786bb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,7 @@ pub mod l1; /// Common types and functions -pub mod common; +pub mod types; /// Configuration management pub mod config; @@ -28,5 +28,5 @@ pub mod rpc; /// A module to handle running Magi in different sync modes pub mod runner; -/// A module to get current Magi version. +/// A module which derives a version of Magi node. pub mod version; diff --git a/src/network/handlers/block_handler.rs b/src/network/handlers/block_handler.rs index 3b4dc499..3709b13f 100644 --- a/src/network/handlers/block_handler.rs +++ b/src/network/handlers/block_handler.rs @@ -1,14 +1,16 @@ use std::sync::mpsc::{channel, Receiver, Sender}; use std::time::SystemTime; -use ethers::types::{Address, Bytes, Signature, H256}; +use ethers::types::{Address, Bytes, Signature, H160, H256}; use ethers::utils::keccak256; + use eyre::Result; use libp2p::gossipsub::{IdentTopic, Message, MessageAcceptance, TopicHash}; use ssz_rs::{prelude::*, List, Vector, U256}; use tokio::sync::watch; -use crate::{common::RawTransaction, engine::ExecutionPayload}; +use crate::network::signer::Signer; +use crate::{engine::ExecutionPayload, types::attributes::RawTransaction}; use super::Handler; @@ -91,6 +93,7 @@ fn decode_block_msg(data: Vec) -> Result<(ExecutionPayload, Signature, Paylo let signature = Signature::try_from(sig_data)?; + // TODO: Seems it can panic, what's basically can crash node. let payload: ExecutionPayloadSSZ = deserialize(block_data)?; let payload: ExecutionPayload = ExecutionPayload::from(payload); @@ -167,6 +170,61 @@ impl From for ExecutionPayload { } } +impl TryFrom for ExecutionPayloadSSZ { + type Error = eyre::Report; + + fn try_from(value: ExecutionPayload) -> Result { + Ok(Self { + parent_hash: convert_hash_to_bytes32(value.parent_hash)?, + fee_recipient: convert_hash_to_address(value.fee_recipient)?, + state_root: convert_hash_to_bytes32(value.state_root)?, + receipts_root: convert_hash_to_bytes32(value.receipts_root)?, + logs_bloom: convert_bytes_to_vector(value.logs_bloom)?, + prev_randao: convert_hash_to_bytes32(value.prev_randao)?, + block_number: value.block_number.as_u64(), + gas_limit: value.gas_limit.as_u64(), + gas_used: value.gas_used.as_u64(), + timestamp: value.timestamp.as_u64(), + extra_data: convert_bytes_to_list(value.extra_data)?, + base_fee_per_gas: value.base_fee_per_gas.as_u64().into(), + block_hash: convert_hash_to_bytes32(value.block_hash)?, + transactions: convert_tx_to_list(value.transactions)?, + }) + } +} + +fn convert_hash_to_bytes32(hash: H256) -> Result { + Bytes32::try_from(hash.as_fixed_bytes().to_vec()) + .map_err(|_| eyre::eyre!("can't convert H256 to Bytes32")) +} + +fn convert_hash_to_address(hash: H160) -> Result { + VecAddress::try_from(hash.as_fixed_bytes().to_vec()) + .map_err(|_| eyre::eyre!("can't convert H160 to Address")) +} + +fn convert_bytes_to_list(data: Bytes) -> Result> { + List::::try_from(data.to_vec()) + .map_err(|_| eyre::eyre!("can't convert bytes to List 32 size")) +} + +fn convert_bytes_to_vector(data: Bytes) -> Result> { + Vector::::try_from(data.to_vec()) + .map_err(|_| eyre::eyre!("can't convert bytes to Vector 256 size")) +} + +fn convert_tx_to_list(txs: Vec) -> Result> { + let mut list: List = Default::default(); + + for tx in txs { + let list_tx = Transaction::try_from(tx.0) + .map_err(|_| eyre::eyre!("can't convert RawTransaction to List"))?; + list.push(list_tx); + } + + Ok(list) +} + fn convert_hash(bytes: Bytes32) -> H256 { H256::from_slice(bytes.as_slice()) } @@ -193,3 +251,101 @@ fn convert_uint(value: U256) -> ethers::types::U64 { fn convert_tx_list(value: List) -> Vec { value.iter().map(|tx| RawTransaction(tx.to_vec())).collect() } + +pub fn encode_block_msg(payload: ExecutionPayload, signer: &Signer) -> Result> { + // Start preparing payload for distribution. + let payload_ssz: ExecutionPayloadSSZ = payload.try_into()?; + let payload_bytes = serialize(&payload_ssz)?; + + // Signature. + let (_, sig) = signer.sign(&payload_bytes)?; + + // Create a payload for distribution. + let mut data: Vec = vec![]; + data.extend(sig); + data.extend(payload_bytes); + + // Zip. + let mut encoder = snap::raw::Encoder::new(); + + // The value can be passed by P2P. + Ok(encoder.compress_vec(&data)?) +} + +#[cfg(test)] +mod tests { + use crate::{ + engine::ExecutionPayload, network::signer::Signer, types::attributes::RawTransaction, + }; + use ethers::core::k256::ecdsa::SigningKey; + use ethers::types::{Bytes, H160, H256, U64}; + use ssz_rs::prelude::*; + + use eyre::Result; + + use rand::Rng; + + use super::{decode_block_msg, encode_block_msg, ExecutionPayloadSSZ}; + + #[test] + fn test_prepare_payload() -> Result<()> { + let mut rng = rand::thread_rng(); + let tx = RawTransaction(rng.gen::<[u8; 32]>().to_vec()); + + let mut logs_bloom = [0u8; 256]; + rng.fill(&mut logs_bloom); + + let payload = ExecutionPayload { + parent_hash: H256::random(), + fee_recipient: H160::random(), + state_root: H256::random(), + receipts_root: H256::random(), + logs_bloom: Bytes::from(logs_bloom), + prev_randao: H256::random(), + block_number: U64::from(rng.gen::()), + gas_limit: U64::from(rng.gen::()), + gas_used: U64::from(rng.gen::()), + timestamp: U64::from(rng.gen::()), + extra_data: Bytes::from(rng.gen::<[u8; 32]>()), + base_fee_per_gas: U64::from(rng.gen::()), + block_hash: H256::random(), + transactions: vec![tx], + }; + + // Start preparing payload for distribution. + let payload_ssz: ExecutionPayloadSSZ = payload.clone().try_into()?; + let payload_bytes = serialize(&payload_ssz)?; + + // Sign. + let private_key = SigningKey::random(&mut rand::thread_rng()); + let signer = Signer::new(1, private_key, None)?; + + // Signature. + let (_, sig) = signer.sign(&payload_bytes)?; + + // Create a payload for distribution. + let mut data: Vec = vec![]; + data.extend(sig.clone()); + data.extend(payload_bytes); + + // Zip. + let mut encoder = snap::raw::Encoder::new(); + + // The value can be passed by P2P. + let compressed_1 = encoder.compress_vec(&data)?; + let compressed_2 = encode_block_msg(payload.clone(), &signer)?; + + assert_eq!(compressed_1, compressed_2); + + for tx in [compressed_1, compressed_2] { + let (decoded_payload, decoded_signature, _) = decode_block_msg(tx)?; + assert_eq!(payload, decoded_payload, "decoded payload different"); + assert_eq!( + &sig, + &decoded_signature.to_vec(), + "decoded signature different" + ); + } + Ok(()) + } +} diff --git a/src/network/mod.rs b/src/network/mod.rs index 0d86d727..d3c88b12 100644 --- a/src/network/mod.rs +++ b/src/network/mod.rs @@ -1,2 +1,11 @@ +use std::net::SocketAddr; + pub mod handlers; pub mod service; +pub mod signer; + +pub const LISTENING_AS_STR: &str = "0.0.0.0:9876"; + +lazy_static::lazy_static! { + pub static ref LISTENING: SocketAddr = LISTENING_AS_STR.parse().unwrap(); +} diff --git a/src/network/service/discovery.rs b/src/network/service/discovery.rs index 82771992..8da43c0e 100644 --- a/src/network/service/discovery.rs +++ b/src/network/service/discovery.rs @@ -6,6 +6,7 @@ use discv5::{ }; use ethers::utils::rlp; use eyre::Result; +use libp2p_identity::Keypair; use tokio::{ sync::mpsc::{self, Receiver}, time::sleep, @@ -14,9 +15,14 @@ use unsigned_varint::{decode, encode}; use super::types::{NetworkAddress, Peer}; -pub fn start(addr: NetworkAddress, chain_id: u64) -> Result> { - let bootnodes = bootnodes(); - let mut disc = create_disc(chain_id)?; +pub fn start( + addr: NetworkAddress, + keypair: Keypair, + chain_id: u64, + bootnodes_list: Option>>, +) -> Result> { + let bootnodes = bootnodes_list.unwrap_or_else(bootnodes); + let mut disc = create_disc(chain_id, keypair)?; let (sender, recv) = mpsc::channel::(256); @@ -61,17 +67,19 @@ fn is_valid_node(node: &Enr, chain_id: u64) -> bool { .unwrap_or_default() } -fn create_disc(chain_id: u64) -> Result { +fn create_disc(chain_id: u64, keypair: Keypair) -> Result { let opstack = OpStackEnrData { chain_id, version: 0, }; let opstack_data: Vec = opstack.into(); - let key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4") + let key = + CombinedKey::secp256k1_from_bytes(&mut keypair.try_into_secp256k1()?.secret().to_bytes())?; + let enr: Enr = EnrBuilder::new("v4") .add_value_rlp("opstack", opstack_data.into()) .build(&key)?; + let config = Discv5Config::default(); Discv5::new(enr, key, config).map_err(|_| eyre::eyre!("could not create disc service")) @@ -109,6 +117,8 @@ impl From for Vec { } } +/// Ethereum Node Entry (ENR) for the mainet optimism. +/// Used in Magi as default values for p2p boot nodes fn bootnodes() -> Vec> { let bootnodes = [ "enr:-J64QBbwPjPLZ6IOOToOLsSjtFUjjzN66qmBZdUexpO32Klrc458Q24kbty2PdRaLacHM5z-cZQr8mjeQu3pik6jPSOGAYYFIqBfgmlkgnY0gmlwhDaRWFWHb3BzdGFja4SzlAUAiXNlY3AyNTZrMaECmeSnJh7zjKrDSPoNMGXoopeDF4hhpj5I0OsQUUt4u8uDdGNwgiQGg3VkcIIkBg", diff --git a/src/network/service/mod.rs b/src/network/service/mod.rs index 1c3f13da..671c755b 100644 --- a/src/network/service/mod.rs +++ b/src/network/service/mod.rs @@ -1,7 +1,8 @@ +use discv5::Enr; use std::{net::SocketAddr, time::Duration}; use eyre::Result; -use futures::{prelude::*, select}; +use futures::prelude::*; use libp2p::{ gossipsub::{self, IdentTopic, Message, MessageId}, mplex::MplexConfig, @@ -9,10 +10,16 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, tcp, Multiaddr, PeerId, Swarm, Transport, }; -use libp2p_identity::Keypair; +use libp2p_identity::{secp256k1, secp256k1::SecretKey, Keypair}; use openssl::sha::sha256; +use tokio::select; use super::{handlers::Handler, service::types::NetworkAddress}; +use crate::network::signer::Signer; + +use crate::{engine::ExecutionPayload, network::handlers::block_handler::encode_block_msg}; + +use ethers::core::k256::ecdsa::SigningKey; mod discovery; mod types; @@ -21,16 +28,39 @@ pub struct Service { handlers: Vec>, addr: SocketAddr, chain_id: u64, - keypair: Option, + keypair: Keypair, + bootnodes: Option>, + signer: Option, + block_topic: IdentTopic, } impl Service { - pub fn new(addr: SocketAddr, chain_id: u64) -> Self { + pub fn new( + addr: SocketAddr, + chain_id: u64, + bootnodes: Option>, + keypair: Option, + sequencer_secret_key: Option, + block_topic: IdentTopic, + ) -> Self { + let keypair = keypair + .map(|secp_secret_key| secp256k1::Keypair::from(secp_secret_key).into()) + .unwrap_or_else(Keypair::generate_secp256k1); + + let signer = sequencer_secret_key + .and_then(|pk| SigningKey::from_slice(&pk.to_bytes()).ok()) + .map(|signing_key| { + Signer::new(chain_id, signing_key, None).expect("Failed to create Signer") + }); + Self { handlers: Vec::new(), addr, chain_id, - keypair: None, + keypair, + bootnodes, + signer, + block_topic, } } @@ -40,16 +70,20 @@ impl Service { } pub fn set_keypair(mut self, keypair: Keypair) -> Self { - self.keypair = Some(keypair); + self.keypair = keypair; self } - pub fn start(mut self) -> Result<()> { + pub fn start( + mut self, + mut receiver_new_block: tokio::sync::mpsc::Receiver, + ) -> Result<()> { let addr = NetworkAddress::try_from(self.addr)?; - let keypair = self.keypair.unwrap_or_else(Keypair::generate_secp256k1); - let mut swarm = create_swarm(keypair, &self.handlers)?; - let mut peer_recv = discovery::start(addr, self.chain_id)?; + let mut swarm = create_swarm(self.keypair.clone(), &self.handlers)?; + + let mut peer_recv = + discovery::start(addr, self.keypair.clone(), self.chain_id, self.bootnodes)?; let multiaddr = Multiaddr::from(addr); swarm @@ -59,20 +93,46 @@ impl Service { let mut handlers = Vec::new(); handlers.append(&mut self.handlers); + // for p2p + let p2p_data = self.signer.take(); + + // Listening to peers tokio::spawn(async move { loop { select! { + // new peer peer = peer_recv.recv().fuse() => { if let Some(peer) = peer { let peer = Multiaddr::from(peer); _ = swarm.dial(peer); } }, + // incoming blocks from peers event = swarm.select_next_some() => { if let SwarmEvent::Behaviour(event) = event { event.handle(&mut swarm, &handlers); } }, + // publish a block for peers + Some(payload) = receiver_new_block.recv() => { + if let Some(signer) = p2p_data.as_ref() { + match encode_block_msg(payload, signer) + .map_err(|err| err.to_string()) + .and_then(|tx|{ + swarm.behaviour_mut().gossipsub + .publish(self.block_topic.clone(),tx) + .map_err(|err| err.to_string()) + }){ + Ok(_message_id) => {}, + Err(err) => tracing::debug!("P2P block broadcast error: {err:?}"), + }; + } else { + tracing::warn!("missed signer p2p private key; skip payload broadcast"); + } + }, + _ = tokio::signal::ctrl_c() => { + break; + } } } }); diff --git a/src/network/signer.rs b/src/network/signer.rs new file mode 100644 index 00000000..c4e677bf --- /dev/null +++ b/src/network/signer.rs @@ -0,0 +1,202 @@ +// +// Seems `ethers` crate cryptography was never audited. +// We must audit before prod.` +// +use ethers::{ + core::k256::ecdsa::SigningKey, + types::{Bytes, U256}, + utils::keccak256, +}; + +use eyre::Result; + +// Expect domain size. +const DOMAIN_BYTES_SIZE: usize = 32; + +#[derive(Clone, Debug)] +pub struct Signer { + chain_id: U256, + private_key: SigningKey, + domain: Bytes, +} + +impl Signer { + pub fn new( + chain_id: u64, + private_key: SigningKey, + domain_bytes: Option, + ) -> Result { + let domain = match domain_bytes { + Some(domain) => domain, + None => Bytes::from(vec![0; DOMAIN_BYTES_SIZE]), + }; + + eyre::ensure!( + domain.len() == DOMAIN_BYTES_SIZE, + "invalid domain size: expected {} bytes but got {} bytes", + DOMAIN_BYTES_SIZE, + domain.len() + ); + + Ok(Self { + chain_id: U256::from(chain_id), + private_key, + domain, + }) + } + + pub fn sign(&self, data: &Vec) -> Result<(Vec, Vec)> { + let hash = self.hash(data); + let (sig, recovery) = self.private_key.sign_prehash_recoverable(&hash)?; + + let mut raw_sig = sig.to_vec(); + raw_sig.push(recovery.to_byte()); + + Ok((hash.to_vec(), raw_sig)) + } + + fn hash(&self, data: &Vec) -> [u8; 32] { + let mut bytes = vec![]; + // Domain (32 bytes). + bytes.extend(self.domain.as_ref()); + // Chain ID (32 bytes). + let mut chain_id: [u8; 32] = [0; 32]; + self.chain_id.to_big_endian(&mut chain_id); + bytes.extend(chain_id); + // Data hash (32 bytes). + let data_hash = keccak256(data); + bytes.extend(data_hash); + + keccak256(bytes) + } +} + +#[cfg(test)] +mod test { + use rand; + use rand::Rng; + + use super::Signer; + use super::DOMAIN_BYTES_SIZE; + + use eyre::Result; + + use ethers::core::k256::ecdsa::signature::hazmat::PrehashVerifier; + use ethers::core::k256::ecdsa::{Signature, SigningKey}; + use ethers::types::{Bytes, U256}; + + #[test] + fn test_new_chain_id() -> Result<()> { + let private_key = SigningKey::random(&mut rand::thread_rng()); + + let signer = Signer::new(16, private_key, None)?; + assert_eq!(signer.chain_id, U256::from(16), "wrong chain id"); + + Ok(()) + } + + #[test] + fn test_new_key() -> Result<()> { + let private_key = SigningKey::random(&mut rand::thread_rng()); + + let signer = Signer::new(16, private_key.clone(), None)?; + assert!(signer.private_key == private_key, "pk is not equal"); + + Ok(()) + } + + #[test] + fn test_new_with_domain() -> Result<()> { + let mut rng = rand::thread_rng(); + + let private_key = SigningKey::random(&mut rng); + let domain = Bytes::from(rng.gen::<[u8; 32]>()); + + let signer = Signer::new(32, private_key, Some(domain.clone()))?; + assert_eq!(signer.domain, domain, "wrong domain"); + + Ok(()) + } + + #[test] + fn test_new_with_domain_wrong_size() { + let mut rng = rand::thread_rng(); + + let private_key = SigningKey::random(&mut rng); + let domain = Bytes::from(rng.gen::<[u8; 1]>()); + + let res = Signer::new(32, private_key, Some(domain.clone())); + assert!(res.is_err(), "should be domain error"); + + let err_msg = res.unwrap_err().to_string(); + assert!(err_msg.contains(&format!( + "invalid domain size: expected {} bytes but got {} bytes", + DOMAIN_BYTES_SIZE, + domain.len() + ))); + } + + #[test] + fn test_new_without_domain() -> Result<()> { + let private_key = SigningKey::random(&mut rand::thread_rng()); + + let signer = Signer::new(32, private_key, None)?; + assert_eq!( + signer.domain, + Bytes::from(vec![0; DOMAIN_BYTES_SIZE]), + "wrong domain" + ); + + Ok(()) + } + + #[test] + fn test_sign() -> Result<()> { + let mut rng = rand::thread_rng(); + let private_key = SigningKey::random(&mut rng); + + let signer = Signer::new(1, private_key.clone(), None)?; + let verifying_key = private_key.verifying_key(); + + let data = rng.gen::<[u8; 32]>(); + let (hash, raw_sig) = signer.sign(&data.to_vec())?; + + let signature = Signature::try_from(&raw_sig.as_slice()[..64])?; + + assert!( + verifying_key.verify_prehash(&hash, &signature).is_ok(), + "signature can't be verified" + ); + + Ok(()) + } + + #[test] + fn test_sign_static() -> Result<()> { + let expected_hash = + hex::decode("5f5692350e3f36252811cbec60967fc171ad5c53516cc0ee482fb5650ba3522f")?; + let expected_sig = hex::decode("09b2819c1d89a5ad6ba226018b720576927e5f03a23c57ba94644c4c981847b82b5b932d8a48668dba4726851cd8da85636919878d85e57da79ae5a3283a136801")?; + let pk_bytes = + hex::decode("0424ec6a64ab50deb8aea88c09dba51107dbc24fb32ccad3507bfa94a5bff43d")?; + + let private_key = SigningKey::from_slice(&pk_bytes)?; + + let signer = Signer::new(1, private_key.clone(), None)?; + let verifying_key = private_key.verifying_key(); + + let data = hex::decode("cbda5a037a1379ece732e4a791b500f8316fdb301f704344d6f9ef97f3efc90d")?; + let (hash, raw_sig) = signer.sign(&data.to_vec())?; + + assert_eq!(hash, expected_hash, "wrong hash"); + assert_eq!(raw_sig, expected_sig, "wrong signature"); + + let signature = Signature::try_from(&raw_sig.as_slice()[..64])?; + + assert!( + verifying_key.verify_prehash(&hash, &signature).is_ok(), + "signature can't be verified" + ); + + Ok(()) + } +} diff --git a/src/rpc/mod.rs b/src/rpc/mod.rs index 0bc8cc4d..27d7cfae 100644 --- a/src/rpc/mod.rs +++ b/src/rpc/mod.rs @@ -1,15 +1,17 @@ -use std::{fmt::Display, net::SocketAddr, sync::Arc}; +use std::{fmt::Display, net::SocketAddr, str::FromStr, sync::Arc}; use crate::{ - config::{Config, ExternalChainConfig}, - version::Version, + config::{ChainConfig, Config}, + types::rpc::SyncStatus, }; +use crate::{types::common::HeadInfo, version::Version}; +use arc_swap::ArcSwap; use eyre::Result; use ethers::{ providers::{Middleware, Provider}, - types::{Block, BlockId, H256}, + types::{Address, Block, BlockId, H256}, utils::keccak256, }; @@ -24,10 +26,13 @@ use serde::{Deserialize, Serialize}; #[rpc(server, namespace = "optimism")] pub trait Rpc { #[method(name = "outputAtBlock")] - async fn output_at_block(&self, block_number: u64) -> Result; + async fn output_at_block(&self, block_num_str: String) -> Result; + + #[method(name = "syncStatus")] + async fn sync_status(&self) -> Result; #[method(name = "rollupConfig")] - async fn rollup_config(&self) -> Result; + async fn rollup_config(&self) -> Result; #[method(name = "version")] async fn version(&self) -> Result; @@ -35,52 +40,66 @@ pub trait Rpc { #[derive(Debug)] pub struct RpcServerImpl { + sync_status: Arc>, version: Version, config: Arc, } #[async_trait] impl RpcServer for RpcServerImpl { - async fn output_at_block(&self, block_number: u64) -> Result { + async fn output_at_block(&self, block_num_str: String) -> Result { + let block_number = u64::from_str_radix(block_num_str.trim_start_matches("0x"), 16) + .map_err(|_| Error::Custom("unable to parse block number".to_string()))?; + let l2_provider = convert_err(Provider::try_from(self.config.l2_rpc_url.clone()))?; - let block = convert_err(l2_provider.get_block(block_number).await)? + let block = convert_err(l2_provider.get_block_with_txs(block_number).await)? .ok_or(Error::Custom("unable to get block".to_string()))?; + let state_root = block.state_root; + let block_hash = block .hash .ok_or(Error::Custom("block hash not found".to_string()))?; + + let block_ref = HeadInfo::try_from(block.clone()) + .map_err(|_| Error::Custom("unable to parse block into head info".to_string()))?; + + let message_parser = + Address::from_str("0x4200000000000000000000000000000000000016").unwrap(); let locations = vec![]; let block_id = Some(BlockId::from(block_hash)); - let state_proof = convert_err( l2_provider - .get_proof( - self.config.chain.l2_to_l1_message_passer, - locations, - block_id, - ) + .get_proof(message_parser, locations, block_id) .await, )?; let withdrawal_storage_root = state_proof.storage_hash; - let output_root = compute_l2_output_root(block, state_proof.storage_hash); + let output_root = compute_l2_output_root(block.into(), state_proof.storage_hash); let version: H256 = Default::default(); + let sync_status = (*self.sync_status.load()).clone(); + Ok(OutputRootResponse { output_root, version, + block_ref, state_root, withdrawal_storage_root, + sync_status: *sync_status, }) } - async fn rollup_config(&self) -> Result { - let config = (*self.config).clone(); + async fn sync_status(&self) -> Result { + let sync_status = (*self.sync_status.load()).clone(); + Ok(*sync_status) + } - Ok(ExternalChainConfig::from(config.chain)) + async fn rollup_config(&self) -> Result { + Ok(self.config.chain.clone()) } async fn version(&self) -> Result { @@ -107,15 +126,20 @@ fn compute_l2_output_root(block: Block, storage_root: H256) -> H256 { H256::from_slice(&digest) } -pub async fn run_server(config: Arc) -> Result { +pub async fn run_server( + config: Arc, + sync_status: Arc>, +) -> Result { let port = config.rpc_port; let server = ServerBuilder::default() - .build(format!("127.0.0.1:{}", port)) + .build(format!("0.0.0.0:{}", port)) .await?; let addr = server.local_addr()?; + let rpc_impl = RpcServerImpl { config, version: Version::build(), + sync_status, }; let handle = server.start(rpc_impl.into_rpc())?; @@ -130,8 +154,10 @@ pub async fn run_server(config: Arc) -> Result { #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct OutputRootResponse { - pub output_root: H256, pub version: H256, - pub state_root: H256, + pub output_root: H256, + pub block_ref: HeadInfo, pub withdrawal_storage_root: H256, + pub state_root: H256, + pub sync_status: SyncStatus, } diff --git a/src/telemetry/metrics.rs b/src/telemetry/metrics.rs index a3e850a4..b49bee44 100644 --- a/src/telemetry/metrics.rs +++ b/src/telemetry/metrics.rs @@ -1,10 +1,14 @@ -use eyre::{Result, WrapErr}; +use std::net::SocketAddr; + +use eyre::Result; use lazy_static::lazy_static; use prometheus_exporter::{ prometheus::{register_int_gauge, IntGauge}, start, }; +pub const LISTENING_AS_STR: &str = "127.0.0.1:9200"; + lazy_static! { pub static ref FINALIZED_HEAD: IntGauge = register_int_gauge!("finalized_head", "finalized head number").unwrap(); @@ -13,7 +17,7 @@ lazy_static! { pub static ref SYNCED: IntGauge = register_int_gauge!("synced", "synced flag").unwrap(); } -pub fn init() -> Result<()> { - start("0.0.0.0:9200".parse().wrap_err("Could not parse address")?)?; +pub fn init(binding: SocketAddr) -> Result<()> { + start(binding)?; Ok(()) } diff --git a/src/types/attributes.rs b/src/types/attributes.rs new file mode 100644 index 00000000..b348959c --- /dev/null +++ b/src/types/attributes.rs @@ -0,0 +1,500 @@ +use core::fmt::Debug; +use ethers::abi::parse_abi_str; +use ethers::abi::{decode, encode, ParamType, Token}; +use ethers::prelude::BaseContract; +use ethers::types::Bytes; +use ethers::types::{Address, Log, H256, U256}; +use ethers::utils::rlp::{DecoderError, Rlp}; +use ethers::utils::{keccak256, rlp::Decodable, rlp::Encodable, rlp::RlpStream}; + +use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; + +use eyre::Result; + +use crate::config::SystemAccounts; +use crate::l1::L1Info; + +use super::common::Epoch; + +/// A raw transaction +#[derive(Clone, PartialEq, Eq)] +pub struct RawTransaction(pub Vec); + +impl RawTransaction { + pub fn derive_unsafe_epoch(&self) -> Result<(Epoch, u64)> { + let rlp = Rlp::new(self.0.as_slice()); + let tx = rlp.as_val::()?; + let calldata = Bytes::try_from(tx.data)?; + let attr = AttributesDepositedCall::try_from(calldata)?; + let epoch = Epoch::from(&attr); + + Ok((epoch, attr.sequence_number)) + } +} + +impl Decodable for RawTransaction { + fn decode(rlp: &Rlp) -> Result { + let tx_bytes: Vec = rlp.as_val()?; + Ok(Self(tx_bytes)) + } +} + +impl Debug for RawTransaction { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "0x{}", hex::encode(&self.0)) + } +} + +impl Serialize for RawTransaction { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&format!("0x{}", hex::encode(&self.0))) + } +} + +impl<'de> Deserialize<'de> for RawTransaction { + fn deserialize>(deserializer: D) -> Result { + let tx: String = serde::Deserialize::deserialize(deserializer)?; + let tx = tx.strip_prefix("0x").unwrap_or(&tx); + Ok(RawTransaction(hex::decode(tx).map_err(D::Error::custom)?)) + } +} + +/// Deposited L2 transaction. +#[derive(Debug, Clone, Default, Eq, PartialEq)] +pub struct DepositedTransaction { + source_hash: H256, + from: Address, + to: Option
, + mint: U256, + value: U256, + gas: u64, + is_system_tx: bool, + data: Vec, +} + +impl From for DepositedTransaction { + fn from(attributes_deposited: AttributesDeposited) -> Self { + let hash = attributes_deposited.hash.to_fixed_bytes(); + let seq = H256::from_low_u64_be(attributes_deposited.sequence_number).to_fixed_bytes(); + let h = keccak256([hash, seq].concat()); + + let domain = H256::from_low_u64_be(1).to_fixed_bytes(); + let source_hash = H256::from_slice(&keccak256([domain, h].concat())); + + let system_accounts = SystemAccounts::default(); + let from = system_accounts.attributes_depositor; + let to = Some(system_accounts.attributes_predeploy); + + let data = attributes_deposited.encode(); + + Self { + source_hash, + from, + to, + mint: U256::zero(), + value: U256::zero(), + gas: attributes_deposited.gas, + is_system_tx: attributes_deposited.is_system_tx, + data, + } + } +} + +impl From for DepositedTransaction { + fn from(user_deposited: UserDeposited) -> Self { + let hash = user_deposited.l1_block_hash.to_fixed_bytes(); + let log_index = user_deposited.log_index.into(); + let h = keccak256([hash, log_index].concat()); + + let domain = H256::from_low_u64_be(0).to_fixed_bytes(); + let source_hash = H256::from_slice(&keccak256([domain, h].concat())); + + let to = if user_deposited.is_creation { + None + } else { + Some(user_deposited.to) + }; + + Self { + source_hash, + from: user_deposited.from, + to, + mint: user_deposited.mint, + value: user_deposited.value, + gas: user_deposited.gas, + is_system_tx: false, + data: user_deposited.data, + } + } +} + +impl Encodable for DepositedTransaction { + fn rlp_append(&self, s: &mut RlpStream) { + s.append_raw(&[0x7E], 1); + s.begin_list(8); + s.append(&self.source_hash); + s.append(&self.from); + + if let Some(to) = self.to { + s.append(&to); + } else { + s.append(&""); + } + + s.append(&self.mint); + s.append(&self.value); + s.append(&self.gas); + s.append(&self.is_system_tx); + s.append(&self.data); + } +} + +impl Decodable for DepositedTransaction { + fn decode(rlp: &Rlp) -> Result { + if !rlp.is_data() { + return Err(DecoderError::RlpExpectedToBeData); + } + + if rlp.data().unwrap() != [0x7E] { + return Err(DecoderError::Custom( + "rlp data for deposited tx contains wrong prefix", + )); + } + + let list_rlp = Rlp::new(&rlp.as_raw()[1..]); + + let source_hash = list_rlp.val_at(0)?; + let from: Address = list_rlp.val_at(1)?; + let to = list_rlp.val_at(2).ok(); + let mint = list_rlp.val_at(3)?; + let value = list_rlp.val_at(4)?; + let gas = list_rlp.val_at(5)?; + let is_system_tx = list_rlp.val_at(6)?; + let data = list_rlp.val_at(7)?; + + Ok(DepositedTransaction { + source_hash, + from, + to, + mint, + value, + gas, + is_system_tx, + data, + }) + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct AttributesDeposited { + number: u64, + timestamp: u64, + base_fee: U256, + hash: H256, + sequence_number: u64, + batcher_hash: H256, + fee_overhead: U256, + fee_scalar: U256, + gas: u64, + is_system_tx: bool, +} + +impl AttributesDeposited { + pub fn from_block_info( + l1_info: &L1Info, + seq: u64, + batch_timestamp: u64, + regolith_time: u64, + ) -> Self { + let is_regolith = Self::get_regolith(batch_timestamp, regolith_time); + let is_system_tx = !is_regolith; + let gas = Self::get_gas(is_regolith); + + Self { + number: l1_info.block_info.number, + timestamp: l1_info.block_info.timestamp, + base_fee: l1_info.block_info.base_fee, + hash: l1_info.block_info.hash, + sequence_number: seq, + batcher_hash: l1_info.system_config.batcher_hash(), + fee_overhead: l1_info.system_config.overhead, + fee_scalar: l1_info.system_config.scalar, + gas, + is_system_tx, + } + } + + fn get_regolith(timestamp: u64, relogith_time: u64) -> bool { + timestamp >= relogith_time + } + + fn get_gas(is_regolith: bool) -> u64 { + if is_regolith { + 1_000_000 + } else { + 150_000_000 + } + } + + pub fn encode(&self) -> Vec { + let tokens = vec![ + Token::Uint(self.number.into()), + Token::Uint(self.timestamp.into()), + Token::Uint(self.base_fee), + Token::FixedBytes(self.hash.as_fixed_bytes().to_vec()), + Token::Uint(self.sequence_number.into()), + Token::FixedBytes(self.batcher_hash.as_fixed_bytes().to_vec()), + Token::Uint(self.fee_overhead), + Token::Uint(self.fee_scalar), + ]; + + let selector = hex::decode("015d8eb9").unwrap(); + let data = encode(&tokens); + + [selector, data].concat() + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct AttributesDepositedCall { + pub number: u64, + pub timestamp: u64, + pub basefee: U256, + pub hash: H256, + pub sequence_number: u64, + pub batcher_hash: H256, + pub fee_overhead: U256, + pub fee_scalar: U256, +} + +type SetL1BlockValueInput = (u64, u64, U256, H256, u64, H256, U256, U256); +const L1_BLOCK_CONTRACT_ABI: &str = r#"[ + function setL1BlockValues(uint64 _number,uint64 _timestamp, uint256 _basefee, bytes32 _hash,uint64 _sequenceNumber,bytes32 _batcherHash,uint256 _l1FeeOverhead,uint256 _l1FeeScalar) external +]"#; + +impl TryFrom for AttributesDepositedCall { + type Error = eyre::Report; + + fn try_from(value: Bytes) -> Result { + let abi = BaseContract::from(parse_abi_str(L1_BLOCK_CONTRACT_ABI)?); + + let ( + number, + timestamp, + basefee, + hash, + sequence_number, + batcher_hash, + fee_overhead, + fee_scalar, + ): SetL1BlockValueInput = abi.decode("setL1BlockValues", value)?; + + Ok(Self { + number, + timestamp, + basefee, + hash, + sequence_number, + batcher_hash, + fee_overhead, + fee_scalar, + }) + } +} + +impl From<&AttributesDepositedCall> for Epoch { + fn from(call: &AttributesDepositedCall) -> Self { + Self { + number: call.number, + timestamp: call.timestamp, + hash: call.hash, + } + } +} + +#[derive(Debug, Clone)] +pub struct UserDeposited { + pub from: Address, + pub to: Address, + pub mint: U256, + pub value: U256, + pub gas: u64, + pub is_creation: bool, + pub data: Vec, + pub l1_block_num: u64, + pub l1_block_hash: H256, + pub log_index: U256, +} + +impl TryFrom for UserDeposited { + type Error = eyre::Report; + + fn try_from(log: Log) -> Result { + let opaque_data = decode(&[ParamType::Bytes], &log.data)?[0] + .clone() + .into_bytes() + .unwrap(); + + let from = Address::try_from(log.topics[1])?; + let to = Address::try_from(log.topics[2])?; + let mint = U256::from_big_endian(&opaque_data[0..32]); + let value = U256::from_big_endian(&opaque_data[32..64]); + let gas = u64::from_be_bytes(opaque_data[64..72].try_into()?); + let is_creation = opaque_data[72] != 0; + let data = opaque_data[73..].to_vec(); + + let l1_block_num = log + .block_number + .ok_or(eyre::eyre!("block num not found"))? + .as_u64(); + + let l1_block_hash = log.block_hash.ok_or(eyre::eyre!("block hash not found"))?; + let log_index = log.log_index.unwrap(); + + Ok(Self { + from, + to, + mint, + value, + gas, + is_creation, + data, + l1_block_num, + l1_block_hash, + log_index, + }) + } +} + +#[cfg(test)] +mod tests { + + mod raw_transaction { + use std::str::FromStr; + + use crate::types::attributes::RawTransaction; + use ethers::types::H256; + + #[test] + fn derive_unsafe_epoch() -> eyre::Result<()> { + let tx = "7ef90159a0ec677ebcdc68441150dad4d485af314aaeb8a06d200e873d0ea1484ac47ce33194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b90104015d8eb9000000000000000000000000000000000000000000000000000000000000005700000000000000000000000000000000000000000000000000000000651f0495000000000000000000000000000000000000000000000000000000000000233579d0a6b649ad11c53645d2115d7912695401b73a35306642cbae97032b31b22b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c44cdddb6a900fa2b585dd299e03d12fa4293bc000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240"; + let bytes = hex::decode(tx)?; + let raw_tx = RawTransaction(bytes); + + let expected_hash = H256::from_str( + "0x79d0a6b649ad11c53645d2115d7912695401b73a35306642cbae97032b31b22b", + )?; + + let (epoch, seq_num) = raw_tx.derive_unsafe_epoch()?; + assert!(epoch.number == 87); + assert!(epoch.timestamp == 1696531605); + assert!(epoch.hash == expected_hash); + assert!(seq_num == 0); + + Ok(()) + } + } + + mod deposited_transaction { + use rand::Rng; + + use crate::types::attributes::DepositedTransaction; + use ethers::{ + types::{Address, H256, U256}, + utils::rlp::{Encodable, Rlp}, + }; + + #[test] + fn decodable_no_recipient() -> eyre::Result<()> { + let mut rng = rand::thread_rng(); + + let tx = DepositedTransaction { + source_hash: H256::random(), + from: Address::random(), + to: Some(Address::random()), + mint: U256::from(rng.gen::()), + value: U256::from(rng.gen::()), + gas: rng.gen::(), + data: rng.gen::<[u8; 32]>().to_vec(), + is_system_tx: rng.gen_bool(1.0 / 2.0), + }; + + let rpl_bytes = tx.rlp_bytes(); + let rlp = Rlp::new(&rpl_bytes); + let decoded_tx = rlp.as_val::()?; + + assert!(tx.source_hash == decoded_tx.source_hash); + assert!(tx.from == decoded_tx.from); + assert!(tx.to == decoded_tx.to); + assert!(tx.mint == decoded_tx.mint); + assert!(tx.value == decoded_tx.value); + assert!(tx.gas == decoded_tx.gas); + assert!(tx.data == decoded_tx.data); + assert!(tx.is_system_tx == decoded_tx.is_system_tx); + + Ok(()) + } + + #[test] + fn decodable() -> eyre::Result<()> { + let mut rng = rand::thread_rng(); + + let tx = DepositedTransaction { + source_hash: H256::random(), + from: Address::random(), + to: None, + mint: U256::from(rng.gen::()), + value: U256::from(rng.gen::()), + gas: rng.gen::(), + data: rng.gen::<[u8; 32]>().to_vec(), + is_system_tx: rng.gen_bool(1.0 / 2.0), + }; + + let rpl_bytes = tx.rlp_bytes(); + let rlp = Rlp::new(&rpl_bytes); + let decoded_tx = rlp.as_val::()?; + + assert!(tx.source_hash == decoded_tx.source_hash); + assert!(tx.from == decoded_tx.from); + assert!(tx.to == decoded_tx.to); + assert!(tx.mint == decoded_tx.mint); + assert!(tx.value == decoded_tx.value); + assert!(tx.gas == decoded_tx.gas); + assert!(tx.data == decoded_tx.data); + assert!(tx.is_system_tx == decoded_tx.is_system_tx); + + Ok(()) + } + } + + mod attributed_deposited_call { + use ethers::types::{Bytes, H256}; + use std::str::FromStr; + + use crate::types::attributes::AttributesDepositedCall; + + #[test] + fn decode_from_bytes() -> eyre::Result<()> { + // Arrange + let calldata = "0x015d8eb900000000000000000000000000000000000000000000000000000000008768240000000000000000000000000000000000000000000000000000000064443450000000000000000000000000000000000000000000000000000000000000000e0444c991c5fe1d7291ff34b3f5c3b44ee861f021396d33ba3255b83df30e357d00000000000000000000000000000000000000000000000000000000000000050000000000000000000000007431310e026b69bfc676c0013e12a1a11411eec9000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240"; + + let expected_hash = + H256::from_str("0444c991c5fe1d7291ff34b3f5c3b44ee861f021396d33ba3255b83df30e357d")?; + let expected_block_number = 8874020; + let expected_timestamp = 1682191440; + + // Act + let call = AttributesDepositedCall::try_from(Bytes::from_str(calldata)?); + + // Assert + assert!(call.is_ok()); + let call = call.unwrap(); + + assert_eq!(call.hash, expected_hash); + assert_eq!(call.number, expected_block_number); + assert_eq!(call.timestamp, expected_timestamp); + + Ok(()) + } + } +} diff --git a/src/types/common.rs b/src/types/common.rs new file mode 100644 index 00000000..87186637 --- /dev/null +++ b/src/types/common.rs @@ -0,0 +1,341 @@ +use ethers::types::{Block, Transaction, H256}; +use figment::value::{Dict, Tag, Value}; +use serde::{Deserialize, Serialize}; + +use crate::engine::ExecutionPayload; + +use eyre::Result; + +use super::attributes::AttributesDepositedCall; + +/// Selected block header info +#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BlockInfo { + pub hash: H256, + pub number: u64, + pub parent_hash: H256, + pub timestamp: u64, +} + +impl TryFrom> for BlockInfo { + type Error = eyre::Report; + + fn try_from(block: Block) -> Result { + let number = block + .number + .ok_or(eyre::eyre!("block not included"))? + .as_u64(); + + let hash = block.hash.ok_or(eyre::eyre!("block not included"))?; + + Ok(BlockInfo { + number, + hash, + parent_hash: block.parent_hash, + timestamp: block.timestamp.as_u64(), + }) + } +} + +impl From<&ExecutionPayload> for BlockInfo { + fn from(value: &ExecutionPayload) -> Self { + Self { + number: value.block_number.as_u64(), + hash: value.block_hash, + parent_hash: value.parent_hash, + timestamp: value.timestamp.as_u64(), + } + } +} + +impl From for Value { + fn from(value: BlockInfo) -> Value { + let mut dict = Dict::new(); + dict.insert("hash".to_string(), Value::from(value.hash.as_bytes())); + dict.insert("number".to_string(), Value::from(value.number)); + dict.insert("timestamp".to_string(), Value::from(value.timestamp)); + dict.insert( + "parent_hash".to_string(), + Value::from(value.parent_hash.as_bytes()), + ); + Value::Dict(Tag::Default, dict) + } +} + +/// L2 block info, referenced to L1 epoch and sequence number. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct HeadInfo { + /// The L2 head. + #[serde(flatten)] + pub head: BlockInfo, + /// Referenced L1 epoch. + #[serde(rename = "l1origin")] + pub epoch: Epoch, + /// Sequencer number in the epoch. + #[serde(rename = "sequenceNumber")] + pub seq_number: u64, +} + +impl HeadInfo { + pub fn new(head: BlockInfo, epoch: Epoch, seq_number: u64) -> Self { + Self { + head, + epoch, + seq_number, + } + } +} + +impl TryFrom<&ExecutionPayload> for HeadInfo { + type Error = eyre::Report; + + fn try_from(payload: &ExecutionPayload) -> Result { + let (epoch, seq_number) = payload + .transactions + .get(0) + .ok_or(eyre::eyre!("no deposit transaction"))? + .derive_unsafe_epoch()?; + + Ok(Self { + head: BlockInfo { + hash: payload.block_hash, + number: payload.block_number.as_u64(), + parent_hash: payload.parent_hash, + timestamp: payload.timestamp.as_u64(), + }, + epoch, + seq_number, + }) + } +} + +impl TryFrom> for HeadInfo { + type Error = eyre::Report; + + fn try_from(block: Block) -> std::result::Result { + let tx_calldata = block + .transactions + .get(0) + .ok_or(eyre::eyre!( + "Could not find the L1 attributes deposited transaction" + ))? + .input + .clone(); + + let call = AttributesDepositedCall::try_from(tx_calldata)?; + + Ok(Self { + head: block.try_into()?, + epoch: Epoch::from(&call), + seq_number: call.sequence_number, + }) + } +} + +/// L1 epoch block +#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct Epoch { + pub number: u64, + pub hash: H256, + pub timestamp: u64, +} + +impl From for Value { + fn from(value: Epoch) -> Self { + let mut dict: std::collections::BTreeMap = Dict::new(); + dict.insert("hash".to_string(), Value::from(value.hash.as_bytes())); + dict.insert("number".to_string(), Value::from(value.number)); + dict.insert("timestamp".to_string(), Value::from(value.timestamp)); + Value::Dict(Tag::Default, dict) + } +} + +#[cfg(test)] +mod tests { + use crate::types::common::HeadInfo; + use std::str::FromStr; + + use ethers::{ + providers::{Middleware, Provider}, + types::{Block, Transaction, H256}, + }; + use eyre::Result; + + #[test] + fn should_fail_conversion_from_a_block_to_head_info_if_missing_l1_deposited_tx() -> Result<()> { + // Arrange + let raw_block = r#"{ + "hash": "0x2e4f4aff36bb7951be9742ad349fb1db84643c6bbac5014f3d196fd88fe333eb", + "parentHash": "0xeccf4c06ad0d27be1cadee5720a509d31a9de0462b52f2cf6045d9a73c9aa504", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "miner": "0x4200000000000000000000000000000000000011", + "stateRoot": "0x5905b2423f299a29db41e377d7ceadf4baa49eed04e1b72957e8c0985e04e730", + "transactionsRoot": "0x030e481411042a769edde83d790d583ed69f9d3098d4a78d00e008f749fcfd97", + "receiptsRoot": "0x29079b696c12a19999f3bb303fddb6fc12fb701f427678cca24954b91080ada3", + "number": "0x7fe52f", + "gasUsed": "0xb711", + "gasLimit": "0x17d7840", + "extraData": "0x", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x644434c2", + "difficulty": "0x0", + "totalDifficulty": "0x0", + "sealFields": [], + "uncles": [], + "transactions": [], + "size": "0x365", + "mixHash": "0x7aeec5550a9b0616701e49ab835af5f10eadba2a0582016f0e256c9cace0c046", + "nonce": "0x0000000000000000", + "baseFeePerGas": "0x32" + } + "#; + + let block: Block = serde_json::from_str(raw_block)?; + + // Act + let head = HeadInfo::try_from(block); + + // Assert + assert!(head.is_err()); + let err = head.unwrap_err(); + + assert!(err + .to_string() + .contains("Could not find the L1 attributes deposited transaction")); + + Ok(()) + } + + #[test] + fn should_convert_from_a_block_to_head_info() -> Result<()> { + // Arrange + let raw_block = r#"{ + "hash": "0x2e4f4aff36bb7951be9742ad349fb1db84643c6bbac5014f3d196fd88fe333eb", + "parentHash": "0xeccf4c06ad0d27be1cadee5720a509d31a9de0462b52f2cf6045d9a73c9aa504", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "miner": "0x4200000000000000000000000000000000000011", + "stateRoot": "0x5905b2423f299a29db41e377d7ceadf4baa49eed04e1b72957e8c0985e04e730", + "transactionsRoot": "0x030e481411042a769edde83d790d583ed69f9d3098d4a78d00e008f749fcfd97", + "receiptsRoot": "0x29079b696c12a19999f3bb303fddb6fc12fb701f427678cca24954b91080ada3", + "number": "0x7fe52f", + "gasUsed": "0xb711", + "gasLimit": "0x17d7840", + "extraData": "0x", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x644434c2", + "difficulty": "0x0", + "totalDifficulty": "0x0", + "sealFields": [], + "uncles": [], + "transactions": [ + { + "hash": "0x661df2908a63c9701ef4f9bc1d62432f08cbdc8c6fe6012af49405c00de5f69d", + "nonce": "0x41ed06", + "blockHash": "0x2e4f4aff36bb7951be9742ad349fb1db84643c6bbac5014f3d196fd88fe333eb", + "blockNumber": "0x7fe52f", + "transactionIndex": "0x0", + "from": "0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001", + "to": "0x4200000000000000000000000000000000000015", + "value": "0x0", + "gasPrice": "0x0", + "gas": "0xf4240", + "input": "0x015d8eb900000000000000000000000000000000000000000000000000000000008768240000000000000000000000000000000000000000000000000000000064443450000000000000000000000000000000000000000000000000000000000000000e0444c991c5fe1d7291ff34b3f5c3b44ee861f021396d33ba3255b83df30e357d00000000000000000000000000000000000000000000000000000000000000050000000000000000000000007431310e026b69bfc676c0013e12a1a11411eec9000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "type": "0x7e", + "mint": "0x0", + "sourceHash": "0x34ad504eea583add76d3b9d249965356ef6ca344d6766644c929357331bb0dc9" + } + ], + "size": "0x365", + "mixHash": "0x7aeec5550a9b0616701e49ab835af5f10eadba2a0582016f0e256c9cace0c046", + "nonce": "0x0000000000000000", + "baseFeePerGas": "0x32" + } + "#; + + let block: Block = serde_json::from_str(raw_block)?; + + let expected_l2_block_hash = + H256::from_str("0x2e4f4aff36bb7951be9742ad349fb1db84643c6bbac5014f3d196fd88fe333eb")?; + let expected_l2_block_number = 8381743; + let expected_l2_block_timestamp = 1682191554; + + let expected_l1_epoch_hash = + H256::from_str("0444c991c5fe1d7291ff34b3f5c3b44ee861f021396d33ba3255b83df30e357d")?; + let expected_l1_epoch_block_number = 8874020; + let expected_l1_epoch_timestamp = 1682191440; + + // Act + let head = HeadInfo::try_from(block); + + // Assert + assert!(head.is_ok()); + let HeadInfo { + head, + epoch, + seq_number, + } = head.unwrap(); + + assert_eq!(head.hash, expected_l2_block_hash); + assert_eq!(head.number, expected_l2_block_number); + assert_eq!(head.timestamp, expected_l2_block_timestamp); + + assert_eq!(epoch.hash, expected_l1_epoch_hash); + assert_eq!(epoch.number, expected_l1_epoch_block_number); + assert_eq!(epoch.timestamp, expected_l1_epoch_timestamp); + + assert_eq!(seq_number, 5); + + Ok(()) + } + + #[tokio::test] + async fn test_head_info_from_l2_block_hash() -> Result<()> { + let l2_rpc = match std::env::var("L2_TEST_RPC_URL") { + Ok(l2_rpc) => l2_rpc, + l2_rpc_res => { + eprintln!( + "Test ignored: `test_head_info_from_l2_block_hash`, l2_rpc: {l2_rpc_res:?}" + ); + return Ok(()); + } + }; + + let l2_block_hash = + H256::from_str("0x75d4a658d7b6430c874c5518752a8d90fb1503eccd6ae4cfc97fd4aedeebb939")?; + + let expected_l2_block_number = 8428108; + let expected_l2_block_timestamp = 1682284284; + + let expected_l1_epoch_hash = + H256::from_str("0x76ab90dc2afea158bbe14a99f22d5f867b51719378aa37d1a3aa3833ace67cad")?; + let expected_l1_epoch_block_number = 8879997; + let expected_l1_epoch_timestamp = 1682284164; + + let provider = Provider::try_from(l2_rpc)?; + + let l2_block = provider.get_block_with_txs(l2_block_hash).await?.unwrap(); + let head = HeadInfo::try_from(l2_block)?; + + let HeadInfo { + head, + epoch, + seq_number, + } = head; + + assert_eq!(head.number, expected_l2_block_number); + assert_eq!(head.timestamp, expected_l2_block_timestamp); + + assert_eq!(epoch.hash, expected_l1_epoch_hash); + assert_eq!(epoch.number, expected_l1_epoch_block_number); + assert_eq!(epoch.timestamp, expected_l1_epoch_timestamp); + + assert_eq!(seq_number, 4); + + Ok(()) + } +} diff --git a/src/types/mod.rs b/src/types/mod.rs new file mode 100644 index 00000000..98d80395 --- /dev/null +++ b/src/types/mod.rs @@ -0,0 +1,3 @@ +pub mod attributes; +pub mod common; +pub mod rpc; diff --git a/src/types/rpc.rs b/src/types/rpc.rs new file mode 100644 index 00000000..99af1b86 --- /dev/null +++ b/src/types/rpc.rs @@ -0,0 +1,54 @@ +use crate::engine::ExecutionPayload; +use serde::{Deserialize, Serialize}; + +use super::common::{BlockInfo, HeadInfo}; + +use eyre::Result; + +/// The node sync status. +#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize)] +pub struct SyncStatus { + pub current_l1: BlockInfo, + pub current_l1_finalized: BlockInfo, + pub head_l1: BlockInfo, + pub safe_l1: BlockInfo, + pub finalized_l1: BlockInfo, + pub unsafe_l2: HeadInfo, + pub safe_l2: HeadInfo, + pub finalized_l2: HeadInfo, + pub queued_unsafe_l2: HeadInfo, + pub engine_sync_target: HeadInfo, +} + +impl SyncStatus { + #[allow(clippy::too_many_arguments)] + pub fn new( + current_l1: BlockInfo, + finalized_l1: BlockInfo, + head_l1: BlockInfo, + safe_l1: BlockInfo, + unsafe_l2: HeadInfo, + safe_l2: HeadInfo, + finalized_l2: HeadInfo, + queued_payload: Option<&ExecutionPayload>, + engine_sync_target: HeadInfo, + ) -> Result { + let queued_unsafe_l2 = match queued_payload { + Some(payload) => payload.try_into()?, + None => Default::default(), + }; + + Ok(Self { + current_l1, + current_l1_finalized: finalized_l1, + head_l1, + safe_l1, + finalized_l1, + unsafe_l2, + safe_l2, + finalized_l2, + queued_unsafe_l2, + engine_sync_target, + }) + } +}