diff --git a/Cargo.lock b/Cargo.lock index bb4399d1ae78..fae21fd42b39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -101,6 +101,159 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "alloy" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8367891bf380210abb0d6aa30c5f85a9080cb4a066c4d5c5acadad630823751b" +dependencies = [ + "alloy-consensus", + "alloy-contract", + "alloy-core", + "alloy-eips", + "alloy-genesis", + "alloy-network", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-serde", + "alloy-signer", + "alloy-signer-local", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", +] + +[[package]] +name = "alloy-chains" +version = "0.1.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18c5c520273946ecf715c0010b4e3503d7eba9893cd9ce6b7fff5654c4a3c470" +dependencies = [ + "alloy-primitives", + "num_enum 0.7.3", + "strum", +] + +[[package]] +name = "alloy-consensus" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "serde", +] + +[[package]] +name = "alloy-contract" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eefe64fd344cffa9cf9e3435ec4e93e6e9c3481bc37269af988bf497faf4a6a" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-types-eth", + "alloy-sol-types", + "alloy-transport", + "futures 0.3.31", + "futures-util", + "thiserror", +] + +[[package]] +name = "alloy-core" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47ef9e96462d0b9fee9008c53c1f3d017b9498fcdef3ad8d728db98afef47955" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives", + "alloy-rlp", + "alloy-sol-types", +] + +[[package]] +name = "alloy-dyn-abi" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85132f2698b520fab3f54beed55a44389f7006a7b557a0261e1e69439dcc1572" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-type-parser", + "alloy-sol-types", + "const-hex", + "itoa", + "serde", + "serde_json", + "winnow 0.6.20", +] + +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "k256 0.13.4", + "serde", +] + +[[package]] +name = "alloy-eips" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" +dependencies = [ + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "derive_more 1.0.0", + "once_cell", + "serde", + "sha2 0.10.8", +] + +[[package]] +name = "alloy-genesis" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a7a18afb0b318616b6b2b0e2e7ac5529d32a966c673b48091c9919e284e6aca" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + [[package]] name = "alloy-json-abi" version = "0.8.11" @@ -113,6 +266,53 @@ dependencies = [ "serde_json", ] +[[package]] +name = "alloy-json-rpc" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3c717b5298fad078cd3a418335b266eba91b511383ca9bd497f742d5975d5ab" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb3705ce7d8602132bcf5ac7a1dd293a42adc2f183abf5907c30ac535ceca049" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "auto_impl", + "futures-utils-wasm", + "thiserror", +] + +[[package]] +name = "alloy-network-primitives" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94ad40869867ed2d9cd3842b1e800889e5b49e6b92da346e93862b4a741bedf3" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", +] + [[package]] name = "alloy-primitives" version = "0.8.11" @@ -142,16 +342,246 @@ dependencies = [ "tiny-keccak 2.0.2", ] +[[package]] +name = "alloy-provider" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "927f708dd457ed63420400ee5f06945df9632d5d101851952056840426a10dc5" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", + "async-stream", + "async-trait", + "auto_impl", + "dashmap 6.1.0", + "futures 0.3.31", + "futures-utils-wasm", + "lru", + "pin-project", + "reqwest 0.12.9", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "alloy-pubsub" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d05f63677e210d758cd5d6d1ce10f20c980c3560ccfbe79ba1997791862a04f" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "bimap", + "futures 0.3.31", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.1", + "tracing", +] + [[package]] name = "alloy-rlp" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" dependencies = [ + "alloy-rlp-derive", "arrayvec 0.7.6", "bytes", ] +[[package]] +name = "alloy-rlp-derive" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d82952dca71173813d4e5733e2c986d8b04aea9e0f3b0a576664c232ad050a5" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-pubsub", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", + "futures 0.3.31", + "pin-project", + "reqwest 0.12.9", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.1", + "tracing", + "url", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64333d639f2a0cf73491813c629a405744e16343a4bc5640931be707c345ecc5" +dependencies = [ + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-engine" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1464c4dd646e1bdfde86ae65ce5ba168dbb29180b478011fe87117ae46b1629b" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "derive_more 1.0.0", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83aa984386deda02482660aa31cb8ca1e63d533f1c31a52d7d181ac5ec68e9b8" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "cfg-if", + "derive_more 1.0.0", + "hashbrown 0.14.5", + "itertools 0.13.0", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-serde" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "731f75ec5d383107fd745d781619bd9cedf145836c51ecb991623d41278e71fa" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "307324cca94354cd654d6713629f0383ec037e1ff9e3e3d547212471209860c0" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "elliptic-curve 0.13.8", + "k256 0.13.4", + "thiserror", +] + +[[package]] +name = "alloy-signer-local" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fabe917ab1778e760b4701628d1cae8e028ee9d52ac6307de4e1e9286ab6b5f" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-signer", + "async-trait", + "k256 0.13.4", + "rand 0.8.5", + "thiserror", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a1b42ac8f45e2f49f4bcdd72cbfde0bb148f5481d403774ffa546e48b83efc1" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06318f1778e57f36333e850aa71bd1bb5e560c10279e236622faae0470c50412" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap 2.6.0", + "proc-macro-error2", + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", + "syn-solidity", + "tiny-keccak 2.0.2", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaebb9b0ad61a41345a22c9279975c0cdd231b97947b10d7aad1cf0a7181e4a5" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "proc-macro2 1.0.89", + "quote 1.0.37", + "serde_json", + "syn 2.0.85", + "syn-solidity", +] + [[package]] name = "alloy-sol-type-parser" version = "0.8.11" @@ -162,6 +592,90 @@ dependencies = [ "winnow 0.6.20", ] +[[package]] +name = "alloy-sol-types" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "374d7fb042d68ddfe79ccb23359de3007f6d4d53c13f703b64fb0db422132111" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + +[[package]] +name = "alloy-transport" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33616b2edf7454302a1d48084db185e52c309f73f6c10be99b0fe39354b3f1e9" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower 0.5.1", + "tracing", + "url", +] + +[[package]] +name = "alloy-transport-http" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a944f5310c690b62bbb3e7e5ce34527cbd36b2d18532a797af123271ce595a49" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest 0.12.9", + "serde_json", + "tower 0.5.1", + "tracing", + "url", +] + +[[package]] +name = "alloy-transport-ipc" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09fd8491249f74d16ec979b1f5672377b12ebb818e6056478ffa386954dbd350" +dependencies = [ + "alloy-json-rpc", + "alloy-pubsub", + "alloy-transport", + "bytes", + "futures 0.3.31", + "interprocess", + "pin-project", + "serde_json", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "alloy-transport-ws" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9704761f6297fe482276bee7f77a93cb42bd541c2bd6c1c560b6f3a9ece672e" +dependencies = [ + "alloy-pubsub", + "alloy-transport", + "futures 0.3.31", + "http 1.1.0", + "rustls 0.23.16", + "serde_json", + "tokio", + "tokio-tungstenite", + "tracing", + "ws_stream_wasm", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -249,9 +763,9 @@ checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] @@ -262,6 +776,39 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash 0.8.11", + "ark-ff 0.5.0", + "ark-poly", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.0", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "rayon", + "zeroize", +] + [[package]] name = "ark-ff" version = "0.3.0" @@ -300,6 +847,27 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec 0.7.6", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rayon", + "zeroize", +] + [[package]] name = "ark-ff-asm" version = "0.3.0" @@ -320,6 +888,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "ark-ff-macros" version = "0.3.0" @@ -345,6 +923,35 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash 0.8.11", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.0", + "rayon", +] + [[package]] name = "ark-serialize" version = "0.3.0" @@ -366,6 +973,31 @@ dependencies = [ "num-bigint 0.4.6", ] +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive", + "ark-std 0.5.0", + "arrayvec 0.7.6", + "digest 0.10.7", + "num-bigint 0.4.6", + "rayon", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "ark-std" version = "0.3.0" @@ -386,6 +1018,17 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", + "rayon", +] + [[package]] name = "arr_macro" version = "0.1.3" @@ -710,6 +1353,17 @@ dependencies = [ "syn 2.0.85", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures 0.3.31", + "pharos", + "rustc_version 0.4.1", +] + [[package]] name = "atoi" version = "2.0.0" @@ -1001,6 +1655,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + [[package]] name = "bincode" version = "1.3.3" @@ -1436,6 +2096,21 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "c-kzg" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" +dependencies = [ + "blst", + "cc", + "glob", + "hex", + "libc", + "once_cell", + "serde", +] + [[package]] name = "camino" version = "1.1.9" @@ -2512,9 +3187,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", @@ -2582,6 +3257,15 @@ dependencies = [ "subtle", ] +[[package]] +name = "directories" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs" version = "5.0.1" @@ -2635,6 +3319,12 @@ dependencies = [ "syn 2.0.85", ] +[[package]] +name = "doctest-file" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aac81fa3e28d21450aa4d2ac065992ba96a1d7303efbce51a95f4fd175b67562" + [[package]] name = "dotenvy" version = "0.15.7" @@ -2744,6 +3434,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "either" version = "1.13.0" @@ -2826,6 +3528,26 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "enum_dispatch" version = "0.3.13" @@ -3484,7 +4206,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ "gloo-timers 0.2.6", - "send_wrapper", + "send_wrapper 0.4.0", ] [[package]] @@ -3506,6 +4228,12 @@ dependencies = [ "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "generic-array" version = "0.14.7" @@ -3540,6 +4268,26 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "get_all_blobs" +version = "0.1.0" +dependencies = [ + "alloy", + "anyhow", + "axum 0.7.7", + "futures 0.3.31", + "hex", + "kzgpad-rs", + "prost 0.13.3", + "reqwest 0.12.9", + "rlp", + "rustls 0.23.16", + "serde", + "serde_json", + "tokio", + "tonic 0.12.3", +] + [[package]] name = "getrandom" version = "0.2.15" @@ -4156,7 +4904,7 @@ dependencies = [ "hyper 1.5.0", "hyper-util", "log", - "rustls 0.23.18", + "rustls 0.23.16", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -4393,6 +5141,21 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "interprocess" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2f4e4a06d42fab3e85ab1b419ad32b09eab58b901d40c57935ff92db3287a13" +dependencies = [ + "doctest-file", + "futures-core", + "libc", + "recvmsg", + "tokio", + "widestring", + "windows-sys 0.52.0", +] + [[package]] name = "ipnet" version = "2.10.1" @@ -4568,7 +5331,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core 0.23.2", "pin-project", - "rustls 0.23.18", + "rustls 0.23.16", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -4667,7 +5430,7 @@ dependencies = [ "hyper-util", "jsonrpsee-core 0.23.2", "jsonrpsee-types 0.23.2", - "rustls 0.23.18", + "rustls 0.23.16", "rustls-platform-verifier", "serde", "serde_json", @@ -4838,6 +5601,14 @@ dependencies = [ "log", ] +[[package]] +name = "kzgpad-rs" +version = "0.1.0" +source = "git+https://github.com/Layr-Labs/kzgpad-rs.git?tag=v0.1.0#b5f8c8d3d6482407dc118cb1f51597a017a1cc89" +dependencies = [ + "rand 0.8.5", +] + [[package]] name = "lalrpop" version = "0.20.2" @@ -5769,7 +6540,7 @@ dependencies = [ "snafu", "tokio", "tower 0.5.1", - "tower-http 0.6.1", + "tower-http 0.6.2", "tracing", "url", ] @@ -6183,6 +6954,16 @@ dependencies = [ "indexmap 2.6.0", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures 0.3.31", + "rustc_version 0.4.1", +] + [[package]] name = "phf" version = "0.11.2" @@ -6493,6 +7274,28 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "proc-macro-hack" version = "0.5.20+deprecated" @@ -6798,7 +7601,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustls 0.23.16", "socket2", "thiserror", "tokio", @@ -6815,7 +7618,7 @@ dependencies = [ "rand 0.8.5", "ring", "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustls 0.23.16", "slab", "thiserror", "tinyvec", @@ -6824,10 +7627,11 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", @@ -6974,6 +7778,12 @@ dependencies = [ "rand_core 0.3.1", ] +[[package]] +name = "recvmsg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" + [[package]] name = "redox_syscall" version = "0.5.7" @@ -7118,7 +7928,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.18", + "rustls 0.23.16", "rustls-native-certs 0.8.0", "rustls-pemfile 2.2.0", "rustls-pki-types", @@ -7332,6 +8142,32 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" +[[package]] +name = "rust-kzg-bn254" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdae4058a9f604acf7023d99d931d6f30261fff93787bcfd1f1ccfc725b701c" +dependencies = [ + "ark-bn254", + "ark-ec", + "ark-ff 0.5.0", + "ark-poly", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "byteorder", + "crossbeam-channel", + "directories", + "hex-literal", + "num-bigint 0.4.6", + "num-traits", + "num_cpus", + "rand 0.8.5", + "rayon", + "sha2 0.10.8", + "sys-info", + "ureq", +] + [[package]] name = "rust_decimal" version = "1.36.0" @@ -7434,9 +8270,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.18" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "aws-lc-rs", "log", @@ -7521,7 +8357,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.18", + "rustls 0.23.16", "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -7729,6 +8565,15 @@ dependencies = [ "yap", ] +[[package]] +name = "scc" +version = "2.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8d25269dd3a12467afe2e510f69fb0b46b698e5afb296b59f2145259deaf8e8" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.26" @@ -7773,6 +8618,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sdd" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49c1eeaf4b6a87c7479688c6d52b9f1153cedd3c489300564f932b065c6eab95" + [[package]] name = "seahash" version = "4.1.0" @@ -7915,6 +8766,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "sentry" version = "0.31.8" @@ -8169,6 +9026,31 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serial_test" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" +dependencies = [ + "futures 0.3.31", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "sha-1" version = "0.9.8" @@ -9197,6 +10079,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edf42e81491fb8871b74df3d222c64ae8cbc1269ea509fa768a3ed3e1b0ac8cb" +dependencies = [ + "paste", + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "syn_derive" version = "0.1.8" @@ -9224,6 +10118,16 @@ dependencies = [ "futures-core", ] +[[package]] +name = "sys-info" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b3a0d0aba8bf96a0e1ddfdc352fc53b3df7f39318c71854910c3c4b024ae52c" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -9620,7 +10524,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.18", + "rustls 0.23.16", "rustls-pki-types", "tokio", ] @@ -9649,6 +10553,22 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "tokio-tungstenite" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" +dependencies = [ + "futures-util", + "log", + "rustls 0.23.16", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tungstenite", + "webpki-roots", +] + [[package]] name = "tokio-util" version = "0.7.12" @@ -9760,8 +10680,11 @@ dependencies = [ "percent-encoding", "pin-project", "prost 0.13.3", + "rustls-native-certs 0.8.0", + "rustls-pemfile 2.2.0", "socket2", "tokio", + "tokio-rustls 0.26.0", "tokio-stream", "tower 0.4.13", "tower-layer", @@ -9828,9 +10751,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "bitflags 2.6.0", "bytes", @@ -9978,6 +10901,26 @@ dependencies = [ "toml", ] +[[package]] +name = "tungstenite" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log", + "rand 0.8.5", + "rustls 0.23.16", + "rustls-pki-types", + "sha1", + "thiserror", + "utf-8", +] + [[package]] name = "twox-hash" version = "1.6.3" @@ -10142,10 +11085,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" dependencies = [ "base64 0.22.1", + "flate2", "log", "native-tls", "once_cell", + "rustls 0.23.16", + "rustls-pki-types", "url", + "webpki-roots", ] [[package]] @@ -10166,6 +11113,12 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8parse" version = "0.2.2" @@ -10498,6 +11451,12 @@ dependencies = [ "wasite", ] +[[package]] +name = "widestring" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" + [[package]] name = "winapi" version = "0.3.9" @@ -10744,6 +11703,25 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ws_stream_wasm" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +dependencies = [ + "async_io_stream", + "futures 0.3.31", + "js-sys", + "log", + "pharos", + "rustc_version 0.4.1", + "send_wrapper 0.6.0", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.5.1" @@ -11494,7 +12472,7 @@ dependencies = [ "octocrab", "regex", "reqwest 0.12.9", - "rustls 0.23.18", + "rustls 0.23.16", "semver 1.0.23", "serde", "serde_json", @@ -11583,6 +12561,7 @@ name = "zksync_da_clients" version = "0.1.0" dependencies = [ "anyhow", + "ark-bn254", "async-trait", "backon", "base58", @@ -11592,33 +12571,43 @@ dependencies = [ "blake2b_simd", "bytes", "celestia-types", + "ethabi", "flate2", "futures 0.3.31", "hex", "http 1.1.0", "jsonrpsee 0.23.2", + "num-bigint 0.4.6", "parity-scale-codec", "pbjson-types", "prost 0.12.6", + "rand 0.8.5", "reqwest 0.12.9", "ripemd", + "rust-kzg-bn254", "scale-encode", "secp256k1", "serde", "serde_json", + "serial_test", "sha2 0.10.8", + "sha3 0.10.8", "subxt-metadata", "subxt-signer", + "tiny-keccak 2.0.2", "tokio", "tokio-stream", "tonic 0.11.0", "tracing", + "url", "zksync_basic_types", "zksync_config", "zksync_da_client", "zksync_env_config", + "zksync_eth_client", "zksync_object_store", "zksync_types", + "zksync_web3_decl", ] [[package]] @@ -12977,7 +13966,7 @@ dependencies = [ "pin-project-lite", "rand 0.8.5", "rlp", - "rustls 0.23.18", + "rustls 0.23.16", "serde", "serde_json", "test-casing", diff --git a/Cargo.toml b/Cargo.toml index 80a1e4104265..6093d88442d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,6 +79,7 @@ members = [ # Test infrastructure "core/tests/loadnext", "core/tests/vm-benchmark", + "get_all_blobs", ] resolver = "2" @@ -222,6 +223,10 @@ pbjson-types = "0.6.0" # Eigen tokio-stream = "0.1.16" +rust-kzg-bn254 = "0.2.1" +ark-bn254 = "0.5.0" +num-bigint = "0.4.6" +serial_test = "3.1.1" # Here and below: # We *always* pin the latest version of protocol to disallow accidental changes in the execution logic. diff --git a/contracts b/contracts index 46d75088e7dd..64ed0ab97ff4 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 46d75088e7ddb534101874c3ec15b877da1cb417 +Subproject commit 64ed0ab97ff4e9d2a265522025bdb8e1a4a4d2eb diff --git a/core/lib/config/src/configs/da_client/eigen.rs b/core/lib/config/src/configs/da_client/eigen.rs index f2c05a0f61ef..68b1390d980a 100644 --- a/core/lib/config/src/configs/da_client/eigen.rs +++ b/core/lib/config/src/configs/da_client/eigen.rs @@ -1,10 +1,27 @@ use serde::Deserialize; use zksync_basic_types::secrets::PrivateKey; - -#[derive(Clone, Debug, Default, PartialEq, Deserialize)] +/// Configuration for the EigenDA remote disperser client. +#[derive(Clone, Debug, PartialEq, Deserialize, Default)] pub struct EigenConfig { - pub rpc_node_url: String, - pub inclusion_polling_interval_ms: u64, + /// URL of the Disperser RPC server + pub disperser_rpc: String, + /// Block height needed to reach in order to consider the blob finalized + /// a value less or equal to 0 means that the disperser will not wait for finalization + pub settlement_layer_confirmation_depth: i32, + /// URL of the Ethereum RPC server + pub eigenda_eth_rpc: String, + /// Address of the service manager contract + pub eigenda_svc_manager_address: String, + /// Wait for the blob to be finalized before returning the response + pub wait_for_finalization: bool, + /// Authenticated dispersal + pub authenticated: bool, + /// Url to the file containing the G1 point used for KZG + pub g1_url: String, + /// Url to the file containing the G2 point used for KZG + pub g2_url: String, + /// Chain ID of the Ethereum network + pub chain_id: u64, } #[derive(Clone, Debug, PartialEq)] diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index e9ad6bd3c074..c8bf1b3b8995 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -6,6 +6,7 @@ pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000; pub const DEFAULT_MAX_ROWS_TO_DISPATCH: u32 = 100; pub const DEFAULT_MAX_RETRIES: u16 = 5; pub const DEFAULT_USE_DUMMY_INCLUSION_DATA: bool = false; +pub const DEFAULT_MAX_CONCURRENT_REQUESTS: u32 = 100; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { @@ -19,6 +20,8 @@ pub struct DADispatcherConfig { // TODO: run a verification task to check if the L1 contract expects the inclusion proofs to // avoid the scenario where contracts expect real proofs, and server is using dummy proofs. pub use_dummy_inclusion_data: Option, + /// The maximun number of concurrent request to send to the DA server. + pub max_concurrent_requests: Option, } impl DADispatcherConfig { @@ -28,6 +31,7 @@ impl DADispatcherConfig { max_rows_to_dispatch: Some(DEFAULT_MAX_ROWS_TO_DISPATCH), max_retries: Some(DEFAULT_MAX_RETRIES), use_dummy_inclusion_data: Some(DEFAULT_USE_DUMMY_INCLUSION_DATA), + max_concurrent_requests: Some(DEFAULT_MAX_CONCURRENT_REQUESTS), } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 1a3f63d9b278..5464d82d1ef2 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -972,6 +972,7 @@ impl Distribution for EncodeDist { max_rows_to_dispatch: self.sample(rng), max_retries: self.sample(rng), use_dummy_inclusion_data: self.sample(rng), + max_concurrent_requests: self.sample(rng), } } } diff --git a/core/lib/dal/.sqlx/query-0fdfa0f31142899f3d5f808688d76ec553688e69dfd330ca408505b8b2cdee5e.json b/core/lib/dal/.sqlx/query-0fdfa0f31142899f3d5f808688d76ec553688e69dfd330ca408505b8b2cdee5e.json new file mode 100644 index 000000000000..355f9993264f --- /dev/null +++ b/core/lib/dal/.sqlx/query-0fdfa0f31142899f3d5f808688d76ec553688e69dfd330ca408505b8b2cdee5e.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number,\n blob_id,\n inclusion_data,\n sent_at\n FROM\n data_availability\n WHERE\n inclusion_data IS NULL\n ORDER BY\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "blob_id", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "inclusion_data", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "sent_at", + "type_info": "Timestamp" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + true, + false + ] + }, + "hash": "0fdfa0f31142899f3d5f808688d76ec553688e69dfd330ca408505b8b2cdee5e" +} diff --git a/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json b/core/lib/dal/.sqlx/query-1df6a0c44dafb0d8932e9c9162b634d167462ce5de6a9c240f990856be8c4c88.json similarity index 88% rename from core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json rename to core/lib/dal/.sqlx/query-1df6a0c44dafb0d8932e9c9162b634d167462ce5de6a9c240f990856be8c4c88.json index f4e08abe31c5..294799d4906c 100644 --- a/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json +++ b/core/lib/dal/.sqlx/query-1df6a0c44dafb0d8932e9c9162b634d167462ce5de6a9c240f990856be8c4c88.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -201,8 +206,9 @@ true, true, true, - true + true, + false ] }, - "hash": "77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9" + "hash": "1df6a0c44dafb0d8932e9c9162b634d167462ce5de6a9c240f990856be8c4c88" } diff --git a/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json b/core/lib/dal/.sqlx/query-47c31073d726572d282232bf550f900a8e5e705543f529e48d9fe96c35ddde75.json similarity index 80% rename from core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json rename to core/lib/dal/.sqlx/query-47c31073d726572d282232bf550f900a8e5e705543f529e48d9fe96c35ddde75.json index 9a93ba45978e..64dbd1dcd019 100644 --- a/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json +++ b/core/lib/dal/.sqlx/query-47c31073d726572d282232bf550f900a8e5e705543f529e48d9fe96c35ddde75.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -202,8 +207,9 @@ true, true, true, - true + true, + false ] }, - "hash": "a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789" + "hash": "47c31073d726572d282232bf550f900a8e5e705543f529e48d9fe96c35ddde75" } diff --git a/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json b/core/lib/dal/.sqlx/query-57686ab3e929331f7efafff78fa48d3973cf8ce53871a2fab4febac60bb56583.json similarity index 85% rename from core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json rename to core/lib/dal/.sqlx/query-57686ab3e929331f7efafff78fa48d3973cf8ce53871a2fab4febac60bb56583.json index f97ea8a6ccd5..f310b82954da 100644 --- a/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json +++ b/core/lib/dal/.sqlx/query-57686ab3e929331f7efafff78fa48d3973cf8ce53871a2fab4febac60bb56583.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -201,8 +206,9 @@ true, true, true, - true + true, + false ] }, - "hash": "c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b" + "hash": "57686ab3e929331f7efafff78fa48d3973cf8ce53871a2fab4febac60bb56583" } diff --git a/core/lib/dal/.sqlx/query-58b7087232e32355241003b7e66eff752996a9d98872de28ca7f686729528552.json b/core/lib/dal/.sqlx/query-58b7087232e32355241003b7e66eff752996a9d98872de28ca7f686729528552.json new file mode 100644 index 000000000000..e6e73498fbc2 --- /dev/null +++ b/core/lib/dal/.sqlx/query-58b7087232e32355241003b7e66eff752996a9d98872de28ca7f686729528552.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number != 0\n AND data_availability.blob_id = $1\n AND pubdata_input IS NOT NULL\n ORDER BY\n number\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "pubdata_input", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "58b7087232e32355241003b7e66eff752996a9d98872de28ca7f686729528552" +} diff --git a/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json b/core/lib/dal/.sqlx/query-6d4746aab463789bdd3ccb251f6b6cc4a3da487ee4a928de1513b13b7b918575.json similarity index 86% rename from core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json rename to core/lib/dal/.sqlx/query-6d4746aab463789bdd3ccb251f6b6cc4a3da487ee4a928de1513b13b7b918575.json index 48adcd412676..2dd50bd6b4d9 100644 --- a/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json +++ b/core/lib/dal/.sqlx/query-6d4746aab463789bdd3ccb251f6b6cc4a3da487ee4a928de1513b13b7b918575.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -199,8 +204,9 @@ true, true, true, - true + true, + false ] }, - "hash": "1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7" + "hash": "6d4746aab463789bdd3ccb251f6b6cc4a3da487ee4a928de1513b13b7b918575" } diff --git a/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json b/core/lib/dal/.sqlx/query-8a3f130f3b1309b30b3f23bc3cff186551207484769344d211d6c9d2fc452ef3.json similarity index 81% rename from core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json rename to core/lib/dal/.sqlx/query-8a3f130f3b1309b30b3f23bc3cff186551207484769344d211d6c9d2fc452ef3.json index 8a68b1a9b9bd..b95fb8c82321 100644 --- a/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json +++ b/core/lib/dal/.sqlx/query-8a3f130f3b1309b30b3f23bc3cff186551207484769344d211d6c9d2fc452ef3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -204,8 +209,9 @@ true, true, true, - true + true, + false ] }, - "hash": "b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd" + "hash": "8a3f130f3b1309b30b3f23bc3cff186551207484769344d211d6c9d2fc452ef3" } diff --git a/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json b/core/lib/dal/.sqlx/query-96de8839bee7d58e2807f98101271fca0e375f1309b34ce09a5beb8ed688c3ef.json similarity index 94% rename from core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json rename to core/lib/dal/.sqlx/query-96de8839bee7d58e2807f98101271fca0e375f1309b34ce09a5beb8ed688c3ef.json index 66d3e18075bf..e45f0ceb6ef9 100644 --- a/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json +++ b/core/lib/dal/.sqlx/query-96de8839bee7d58e2807f98101271fca0e375f1309b34ce09a5beb8ed688c3ef.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -203,8 +208,9 @@ true, true, true, - true + true, + false ] }, - "hash": "4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970" + "hash": "96de8839bee7d58e2807f98101271fca0e375f1309b34ce09a5beb8ed688c3ef" } diff --git a/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json b/core/lib/dal/.sqlx/query-af2bab04895e886343f80077af31afd7240ef53d95408a0d38bff65f786b038a.json similarity index 78% rename from core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json rename to core/lib/dal/.sqlx/query-af2bab04895e886343f80077af31afd7240ef53d95408a0d38bff65f786b038a.json index 11bff1102932..63b5a6501105 100644 --- a/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json +++ b/core/lib/dal/.sqlx/query-af2bab04895e886343f80077af31afd7240ef53d95408a0d38bff65f786b038a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -205,8 +210,9 @@ true, true, true, - true + true, + false ] }, - "hash": "45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746" + "hash": "af2bab04895e886343f80077af31afd7240ef53d95408a0d38bff65f786b038a" } diff --git a/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json b/core/lib/dal/.sqlx/query-fa52ecb8ee44f02f8d5a2061266c277d67f184d29082a03bc70b9d95700e8c05.json similarity index 86% rename from core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json rename to core/lib/dal/.sqlx/query-fa52ecb8ee44f02f8d5a2061266c277d67f184d29082a03bc70b9d95700e8c05.json index dfdb4b6c82e7..e2c6df469102 100644 --- a/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json +++ b/core/lib/dal/.sqlx/query-fa52ecb8ee44f02f8d5a2061266c277d67f184d29082a03bc70b9d95700e8c05.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -201,8 +206,9 @@ true, true, true, - true + true, + false ] }, - "hash": "62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37" + "hash": "fa52ecb8ee44f02f8d5a2061266c277d67f184d29082a03bc70b9d95700e8c05" } diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 5002c5a8afbf..3c128458c87f 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -348,7 +348,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1213,7 +1214,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1408,7 +1410,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1497,7 +1500,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM ( SELECT @@ -1577,7 +1581,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1713,7 +1718,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1786,7 +1792,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1873,7 +1880,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 41dd7efe2732..3815d65f20b4 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -175,6 +175,45 @@ impl DataAvailabilityDal<'_, '_> { .map(DataAvailabilityBlob::from)) } + pub async fn get_da_blob_ids_awaiting_inclusion( + &mut self, + ) -> DalResult>> { + let rows = sqlx::query!( + r#" + SELECT + l1_batch_number, + blob_id, + inclusion_data, + sent_at + FROM + data_availability + WHERE + inclusion_data IS NULL + ORDER BY + l1_batch_number + "#, + ) + .instrument("get_da_blobs_awaiting_inclusion") + .fetch_all(self.storage) + .await?; + + Ok(rows + .into_iter() + .map(|row| { + let l1_batch_number_u32 = row.l1_batch_number.try_into(); + if let Ok(l1_batch_number) = l1_batch_number_u32 { + Some(DataAvailabilityBlob { + l1_batch_number: L1BatchNumber(l1_batch_number), + blob_id: row.blob_id, + inclusion_data: row.inclusion_data, + sent_at: row.sent_at.and_utc(), + }) + } else { + None + } + }) + .collect()) + } /// Fetches the pubdata and `l1_batch_number` for the L1 batches that are ready for DA dispatch. pub async fn get_ready_for_da_dispatch_l1_batches( &mut self, @@ -216,4 +255,43 @@ impl DataAvailabilityDal<'_, '_> { }) .collect()) } + + /// Fetches the pubdata for the L1 batch with a given blob id. + pub async fn get_blob_data_by_blob_id( + &mut self, + blob_id: &str, + ) -> DalResult> { + let row = sqlx::query!( + r#" + SELECT + number, + pubdata_input + FROM + l1_batches + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number + WHERE + number != 0 + AND data_availability.blob_id = $1 + AND pubdata_input IS NOT NULL + ORDER BY + number + LIMIT + 1 + "#, + blob_id, + ) + .instrument("get_blob_data_by_blob_id") + .with_arg("blob_id", &blob_id) + .fetch_optional(self.storage) + .await? + .map(|row| L1BatchDA { + // `unwrap` is safe here because we have a `WHERE` clause that filters out `NULL` values + pubdata: row.pubdata_input.unwrap(), + l1_batch_number: L1BatchNumber(row.number as u32), + }); + + Ok(row) + } } diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 54635932a1af..81420937fdad 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -159,6 +159,7 @@ pub(crate) struct StorageL1Batch { pub local_root: Option>, pub state_diff_hash: Option>, pub inclusion_data: Option>, + pub blob_id: Option, } impl StorageL1Batch { @@ -271,6 +272,7 @@ impl TryFrom for L1BatchMetadata { local_root: batch.local_root.map(|v| H256::from_slice(&v)), aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)), da_inclusion_data: batch.inclusion_data, + da_blob_id: batch.blob_id.map(|s| s.into_bytes()), }) } } diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 8ceeb215faf4..e867461b3c2a 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -1,17 +1,21 @@ use std::env; -use zksync_config::configs::{ - da_client::{ - avail::{ - AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, +use zksync_config::{ + configs::{ + da_client::{ + avail::{ + AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, + AVAIL_GAS_RELAY_CLIENT_NAME, + }, + celestia::CelestiaSecrets, + eigen::EigenSecrets, + DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME, + EIGEN_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, }, - celestia::CelestiaSecrets, - eigen::EigenSecrets, - DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME, - EIGEN_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, + secrets::DataAvailabilitySecrets, + AvailConfig, }, - secrets::DataAvailabilitySecrets, - AvailConfig, + EigenConfig, }; use crate::{envy_load, FromEnv}; @@ -34,7 +38,20 @@ impl FromEnv for DAClientConfig { }, }), CELESTIA_CLIENT_CONFIG_NAME => Self::Celestia(envy_load("da_celestia_config", "DA_")?), - EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(envy_load("da_eigen_config", "DA_")?), + EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(EigenConfig { + disperser_rpc: env::var("EIGENDA_DISPERSER_RPC")?, + settlement_layer_confirmation_depth: env::var( + "EIGENDA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH", + )? + .parse()?, + eigenda_eth_rpc: env::var("EIGENDA_EIGENDA_ETH_RPC")?, + eigenda_svc_manager_address: env::var("EIGENDA_EIGENDA_SVC_MANAGER_ADDRESS")?, + wait_for_finalization: env::var("EIGENDA_WAIT_FOR_FINALIZATION")?.parse()?, + authenticated: env::var("EIGENDA_AUTHENTICATED")?.parse()?, + g1_url: env::var("EIGENDA_G1_URL")?.parse()?, + g2_url: env::var("EIGENDA_G2_URL")?.parse()?, + chain_id: env::var("EIGENDA_CHAIN_ID")?.parse()?, + }), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -248,8 +265,15 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" DA_CLIENT="Eigen" - DA_RPC_NODE_URL="localhost:12345" - DA_INCLUSION_POLLING_INTERVAL_MS="1000" + EIGENDA_DISPERSER_RPC="http://localhost:8080" + EIGENDA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH=0 + EIGENDA_EIGENDA_ETH_RPC="http://localhost:8545" + EIGENDA_EIGENDA_SVC_MANAGER_ADDRESS="0x123" + EIGENDA_WAIT_FOR_FINALIZATION=true + EIGENDA_AUTHENTICATED=false + EIGENDA_G1_URL="resources1" + EIGENDA_G2_URL="resources2" + EIGENDA_CHAIN_ID=1 "#; lock.set_env(config); @@ -257,8 +281,15 @@ mod tests { assert_eq!( actual, DAClientConfig::Eigen(EigenConfig { - rpc_node_url: "localhost:12345".to_string(), - inclusion_polling_interval_ms: 1000, + disperser_rpc: "http://localhost:8080".to_string(), + settlement_layer_confirmation_depth: 0, + eigenda_eth_rpc: "http://localhost:8545".to_string(), + eigenda_svc_manager_address: "0x123".to_string(), + wait_for_finalization: true, + authenticated: false, + g1_url: "resources1".to_string(), + g2_url: "resources2".to_string(), + chain_id: 1 }) ); } diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index 246752db91ac..805e6b2234b5 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -21,12 +21,14 @@ mod tests { interval: u32, rows_limit: u32, max_retries: u16, + max_concurrent_requests: u32, ) -> DADispatcherConfig { DADispatcherConfig { polling_interval_ms: Some(interval), max_rows_to_dispatch: Some(rows_limit), max_retries: Some(max_retries), use_dummy_inclusion_data: Some(true), + max_concurrent_requests: Some(max_concurrent_requests), } } @@ -38,9 +40,10 @@ mod tests { DA_DISPATCHER_MAX_ROWS_TO_DISPATCH=60 DA_DISPATCHER_MAX_RETRIES=7 DA_DISPATCHER_USE_DUMMY_INCLUSION_DATA="true" + DA_DISPATCHER_MAX_CONCURRENT_REQUESTS=10 "#; lock.set_env(config); let actual = DADispatcherConfig::from_env().unwrap(); - assert_eq!(actual, expected_da_layer_config(5000, 60, 7)); + assert_eq!(actual, expected_da_layer_config(5000, 60, 7, 10)); } } diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index 325288056b35..f7c45e98500e 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -1,4 +1,3 @@ -use anyhow::Context as _; use serde::de::DeserializeOwned; mod api; @@ -44,5 +43,5 @@ pub trait FromEnv: Sized { pub fn envy_load(name: &str, prefix: &str) -> anyhow::Result { envy::prefixed(prefix) .from_env() - .with_context(|| format!("Cannot load config <{name}>")) + .map_err(|e| anyhow::anyhow!("Failed to load {} from env: {}", name, e)) } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 573b3c65a3e3..4b98e7af1bb6 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -230,7 +230,16 @@ impl Tokenizable for CommitBatchInfo<'_> { panic!("Custom pubdata DA is incompatible with Rollup mode") } (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { - vec![PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY] + let mut operator_da_input = vec![PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY]; + operator_da_input.extend( + &self + .l1_batch_with_metadata + .metadata + .da_blob_id + .clone() + .unwrap_or_default(), + ); + operator_da_input } ( L1BatchCommitmentMode::Rollup, diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index 341a6a9e4f43..bd817826de7f 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -10,7 +10,10 @@ use zksync_config::configs::{ }; use zksync_protobuf::{required, ProtoRepr}; -use crate::proto::{da_client as proto, object_store as object_store_proto}; +use crate::proto::{ + da_client::{self as proto}, + object_store as object_store_proto, +}; impl ProtoRepr for proto::DataAvailabilityClient { type Type = configs::DAClientConfig; @@ -53,11 +56,25 @@ impl ProtoRepr for proto::DataAvailabilityClient { timeout_ms: *required(&conf.timeout_ms).context("timeout_ms")?, }), proto::data_availability_client::Config::Eigen(conf) => Eigen(EigenConfig { - rpc_node_url: required(&conf.rpc_node_url) - .context("rpc_node_url")? + disperser_rpc: required(&conf.disperser_rpc) + .context("disperser_rpc")? + .clone(), + settlement_layer_confirmation_depth: *required( + &conf.settlement_layer_confirmation_depth, + ) + .context("settlement_layer_confirmation_depth")?, + eigenda_eth_rpc: required(&conf.eigenda_eth_rpc) + .context("eigenda_eth_rpc")? + .clone(), + eigenda_svc_manager_address: required(&conf.eigenda_svc_manager_address) + .context("eigenda_svc_manager_address")? .clone(), - inclusion_polling_interval_ms: *required(&conf.inclusion_polling_interval_ms) - .context("inclusion_polling_interval_ms")?, + wait_for_finalization: *required(&conf.wait_for_finalization) + .context("wait_for_finalization")?, + authenticated: *required(&conf.authenticated).context("authenticated")?, + g1_url: required(&conf.g1_url).context("g1_url")?.clone(), + g2_url: required(&conf.g2_url).context("g2_url")?.clone(), + chain_id: *required(&conf.chain_id).context("chain_id")?, }), proto::data_availability_client::Config::ObjectStore(conf) => { ObjectStore(object_store_proto::ObjectStore::read(conf)?) @@ -96,8 +113,17 @@ impl ProtoRepr for proto::DataAvailabilityClient { }) } Eigen(config) => proto::data_availability_client::Config::Eigen(proto::EigenConfig { - rpc_node_url: Some(config.rpc_node_url.clone()), - inclusion_polling_interval_ms: Some(config.inclusion_polling_interval_ms), + disperser_rpc: Some(config.disperser_rpc.clone()), + settlement_layer_confirmation_depth: Some( + config.settlement_layer_confirmation_depth, + ), + eigenda_eth_rpc: Some(config.eigenda_eth_rpc.clone()), + eigenda_svc_manager_address: Some(config.eigenda_svc_manager_address.clone()), + wait_for_finalization: Some(config.wait_for_finalization), + authenticated: Some(config.authenticated), + g1_url: Some(config.g1_url.clone()), + g2_url: Some(config.g2_url.clone()), + chain_id: Some(config.chain_id), }), ObjectStore(config) => proto::data_availability_client::Config::ObjectStore( object_store_proto::ObjectStore::build(config), diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index d77073bd32cf..e85ff5ae76ed 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -12,6 +12,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { max_rows_to_dispatch: self.max_rows_to_dispatch, max_retries: self.max_retries.map(|x| x as u16), use_dummy_inclusion_data: self.use_dummy_inclusion_data, + max_concurrent_requests: self.max_concurrent_requests, }) } @@ -21,6 +22,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { max_rows_to_dispatch: this.max_rows_to_dispatch, max_retries: this.max_retries.map(Into::into), use_dummy_inclusion_data: this.use_dummy_inclusion_data, + max_concurrent_requests: this.max_concurrent_requests, } } } diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index 0a302120d775..dd44d0ad14d1 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -37,8 +37,17 @@ message CelestiaConfig { } message EigenConfig { - optional string rpc_node_url = 1; - optional uint64 inclusion_polling_interval_ms = 2; + optional string disperser_rpc = 3; + optional int32 settlement_layer_confirmation_depth = 4; + optional string eigenda_eth_rpc = 5; + optional string eigenda_svc_manager_address = 6; + optional bool wait_for_finalization = 7; + optional bool authenticated = 8; + optional string g1_url = 9; + optional string g2_url = 10; + optional uint64 chain_id = 11; + reserved 1,2; + reserved "rpc_node_url","inclusion_polling_interval_ms"; } message DataAvailabilityClient { diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index dd366bd5b925..d6329d14b281 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -7,4 +7,5 @@ message DataAvailabilityDispatcher { optional uint32 max_rows_to_dispatch = 2; optional uint32 max_retries = 3; optional bool use_dummy_inclusion_data = 4; + optional uint32 max_concurrent_requests = 5; } diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 786ce03e671d..5fd030ec63c6 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -129,6 +129,8 @@ pub struct L1BatchMetadata { pub aggregation_root: Option, /// Data Availability inclusion proof, that has to be verified on the settlement layer. pub da_inclusion_data: Option>, + /// Data Availability blob id, persisted in L1 so it can be used for chain reconstruction. + pub da_blob_id: Option>, } impl L1BatchMetadata { diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index e0c85b3030ab..f12042b12576 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -55,3 +55,14 @@ pbjson-types.workspace = true # Eigen dependencies tokio-stream.workspace = true +rand.workspace = true +sha3.workspace = true +tiny-keccak.workspace = true +ethabi.workspace = true +rust-kzg-bn254.workspace = true +ark-bn254.workspace = true +num-bigint.workspace = true +serial_test.workspace = true +zksync_web3_decl.workspace = true +zksync_eth_client.workspace = true +url.workspace = true diff --git a/core/node/da_clients/src/eigen/README.md b/core/node/da_clients/src/eigen/README.md index 634b4eb58780..ae2398088dc0 100644 --- a/core/node/da_clients/src/eigen/README.md +++ b/core/node/da_clients/src/eigen/README.md @@ -1,11 +1,13 @@ -# EigenDA client +# EigenDA Client ---- +EigenDA is as a high-throughput data availability layer for rollups. It is an EigenLayer AVS (Actively Validated +Service), so it leverages Ethereum's economic security instead of bootstrapping a new network with its own validators. +For more information you can check the [docs](https://docs.eigenda.xyz/). -This is an implementation of the EigenDA client capable of sending the blobs to DA layer. It uses authenticated -requests, though the auth headers are kind of mocked in the current API implementation. +## Temporary -The generated files are received by compiling the `.proto` files from EigenDA repo using the following function: +In order to implement the integration we generated some `.proto` files from EigenDA repo that were compiled using the +following function: ```rust pub fn compile_protos() { @@ -28,8 +30,5 @@ pub fn compile_protos() { } ``` -proto files are not included here to not create confusion in case they are not updated in time, so the EigenDA -[repo](https://github.com/Layr-Labs/eigenda/tree/master/api/proto) has to be a source of truth for the proto files. - -The generated folder here is considered a temporary solution until the EigenDA has a library with either a protogen, or +The generated folder is considered a temporary solution until the EigenDA has a library with either a protogen, or preferably a full Rust client implementation. diff --git a/core/node/da_clients/src/eigen/blob_info.rs b/core/node/da_clients/src/eigen/blob_info.rs new file mode 100644 index 000000000000..a44117fd4ed7 --- /dev/null +++ b/core/node/da_clients/src/eigen/blob_info.rs @@ -0,0 +1,266 @@ +use std::fmt; + +use super::{ + common::G1Commitment as DisperserG1Commitment, + disperser::{ + BatchHeader as DisperserBatchHeader, BatchMetadata as DisperserBatchMetadata, + BlobHeader as DisperserBlobHeader, BlobInfo as DisperserBlobInfo, + BlobQuorumParam as DisperserBlobQuorumParam, + BlobVerificationProof as DisperserBlobVerificationProof, + }, +}; + +#[derive(Debug)] +pub enum ConversionError { + NotPresentError, +} + +impl fmt::Display for ConversionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConversionError::NotPresentError => write!(f, "Failed to convert BlobInfo"), + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct G1Commitment { + pub x: Vec, + pub y: Vec, +} + +impl G1Commitment { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.x.len().to_be_bytes()); + bytes.extend(&self.x); + bytes.extend(&self.y.len().to_be_bytes()); + bytes.extend(&self.y); + + bytes + } +} + +impl From for G1Commitment { + fn from(value: DisperserG1Commitment) -> Self { + Self { + x: value.x, + y: value.y, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobQuorumParam { + pub quorum_number: u32, + pub adversary_threshold_percentage: u32, + pub confirmation_threshold_percentage: u32, + pub chunk_length: u32, +} + +impl BlobQuorumParam { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.quorum_number.to_be_bytes()); + bytes.extend(&self.adversary_threshold_percentage.to_be_bytes()); + bytes.extend(&self.confirmation_threshold_percentage.to_be_bytes()); + bytes.extend(&self.chunk_length.to_be_bytes()); + + bytes + } +} + +impl From for BlobQuorumParam { + fn from(value: DisperserBlobQuorumParam) -> Self { + Self { + quorum_number: value.quorum_number, + adversary_threshold_percentage: value.adversary_threshold_percentage, + confirmation_threshold_percentage: value.confirmation_threshold_percentage, + chunk_length: value.chunk_length, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobHeader { + pub commitment: G1Commitment, + pub data_length: u32, + pub blob_quorum_params: Vec, +} + +impl BlobHeader { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(self.commitment.to_bytes()); + bytes.extend(&self.data_length.to_be_bytes()); + bytes.extend(&self.blob_quorum_params.len().to_be_bytes()); + + for quorum in &self.blob_quorum_params { + bytes.extend(quorum.to_bytes()); + } + + bytes + } +} + +impl TryFrom for BlobHeader { + type Error = ConversionError; + fn try_from(value: DisperserBlobHeader) -> Result { + if value.commitment.is_none() { + return Err(ConversionError::NotPresentError); + } + let blob_quorum_params: Vec = value + .blob_quorum_params + .iter() + .map(|param| BlobQuorumParam::from(param.clone())) + .collect(); + Ok(Self { + commitment: G1Commitment::from(value.commitment.unwrap()), + data_length: value.data_length, + blob_quorum_params, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BatchHeader { + pub batch_root: Vec, + pub quorum_numbers: Vec, + pub quorum_signed_percentages: Vec, + pub reference_block_number: u32, +} + +impl BatchHeader { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.batch_root.len().to_be_bytes()); + bytes.extend(&self.batch_root); + bytes.extend(&self.quorum_numbers.len().to_be_bytes()); + bytes.extend(&self.quorum_numbers); + bytes.extend(&self.quorum_signed_percentages.len().to_be_bytes()); + bytes.extend(&self.quorum_signed_percentages); + bytes.extend(&self.reference_block_number.to_be_bytes()); + + bytes + } +} + +impl From for BatchHeader { + fn from(value: DisperserBatchHeader) -> Self { + Self { + batch_root: value.batch_root, + quorum_numbers: value.quorum_numbers, + quorum_signed_percentages: value.quorum_signed_percentages, + reference_block_number: value.reference_block_number, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BatchMetadata { + pub batch_header: BatchHeader, + pub signatory_record_hash: Vec, + pub fee: Vec, + pub confirmation_block_number: u32, + pub batch_header_hash: Vec, +} + +impl BatchMetadata { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(self.batch_header.to_bytes()); + bytes.extend(&self.signatory_record_hash); + bytes.extend(&self.confirmation_block_number.to_be_bytes()); + + bytes + } +} + +impl TryFrom for BatchMetadata { + type Error = ConversionError; + fn try_from(value: DisperserBatchMetadata) -> Result { + if value.batch_header.is_none() { + return Err(ConversionError::NotPresentError); + } + Ok(Self { + batch_header: BatchHeader::from(value.batch_header.unwrap()), + signatory_record_hash: value.signatory_record_hash, + fee: value.fee, + confirmation_block_number: value.confirmation_block_number, + batch_header_hash: value.batch_header_hash, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobVerificationProof { + pub batch_id: u32, + pub blob_index: u32, + pub batch_medatada: BatchMetadata, + pub inclusion_proof: Vec, + pub quorum_indexes: Vec, +} + +impl BlobVerificationProof { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.batch_id.to_be_bytes()); + bytes.extend(&self.blob_index.to_be_bytes()); + bytes.extend(self.batch_medatada.to_bytes()); + bytes.extend(&self.inclusion_proof.len().to_be_bytes()); + bytes.extend(&self.inclusion_proof); + bytes.extend(&self.quorum_indexes.len().to_be_bytes()); + bytes.extend(&self.quorum_indexes); + + bytes + } +} + +impl TryFrom for BlobVerificationProof { + type Error = ConversionError; + fn try_from(value: DisperserBlobVerificationProof) -> Result { + if value.batch_metadata.is_none() { + return Err(ConversionError::NotPresentError); + } + Ok(Self { + batch_id: value.batch_id, + blob_index: value.blob_index, + batch_medatada: BatchMetadata::try_from(value.batch_metadata.unwrap())?, + inclusion_proof: value.inclusion_proof, + quorum_indexes: value.quorum_indexes, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobInfo { + pub blob_header: BlobHeader, + pub blob_verification_proof: BlobVerificationProof, +} + +impl BlobInfo { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + let blob_header_bytes = self.blob_header.to_bytes(); + bytes.extend(blob_header_bytes.len().to_be_bytes()); + bytes.extend(blob_header_bytes); + let blob_verification_proof_bytes = self.blob_verification_proof.to_bytes(); + bytes.extend(blob_verification_proof_bytes); + bytes + } +} + +impl TryFrom for BlobInfo { + type Error = ConversionError; + fn try_from(value: DisperserBlobInfo) -> Result { + if value.blob_header.is_none() || value.blob_verification_proof.is_none() { + return Err(ConversionError::NotPresentError); + } + Ok(Self { + blob_header: BlobHeader::try_from(value.blob_header.unwrap())?, + blob_verification_proof: BlobVerificationProof::try_from( + value.blob_verification_proof.unwrap(), + )?, + }) + } +} diff --git a/core/node/da_clients/src/eigen/client.rs b/core/node/da_clients/src/eigen/client.rs index d977620526aa..430b5bb4c4a7 100644 --- a/core/node/da_clients/src/eigen/client.rs +++ b/core/node/da_clients/src/eigen/client.rs @@ -10,33 +10,37 @@ use zksync_da_client::{ }; use super::sdk::RawEigenClient; -use crate::utils::to_non_retriable_da_error; +use crate::utils::to_retriable_da_error; +#[async_trait] +pub trait GetBlobData: Clone + std::fmt::Debug + Send + Sync { + async fn call(&self, input: &str) -> anyhow::Result>>; +} + +/// EigenClient is a client for the Eigen DA service. #[derive(Debug, Clone)] -pub struct EigenClient { - client: Arc, +pub struct EigenClient { + pub(crate) client: Arc>, } -impl EigenClient { - pub async fn new(config: EigenConfig, secrets: EigenSecrets) -> anyhow::Result { +impl EigenClient { + pub async fn new( + config: EigenConfig, + secrets: EigenSecrets, + get_blob_data: Box, + ) -> anyhow::Result { let private_key = SecretKey::from_str(secrets.private_key.0.expose_secret().as_str()) .map_err(|e| anyhow::anyhow!("Failed to parse private key: {}", e))?; - Ok(EigenClient { - client: Arc::new( - RawEigenClient::new( - config.rpc_node_url, - config.inclusion_polling_interval_ms, - private_key, - ) - .await?, - ), + let client = RawEigenClient::new(private_key, config, get_blob_data).await?; + Ok(Self { + client: Arc::new(client), }) } } #[async_trait] -impl DataAvailabilityClient for EigenClient { +impl DataAvailabilityClient for EigenClient { async fn dispatch_blob( &self, _: u32, // batch number @@ -46,13 +50,24 @@ impl DataAvailabilityClient for EigenClient { .client .dispatch_blob(data) .await - .map_err(to_non_retriable_da_error)?; + .map_err(to_retriable_da_error)?; Ok(DispatchResponse::from(blob_id)) } - async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { - Ok(Some(InclusionData { data: vec![] })) + async fn get_inclusion_data(&self, blob_id: &str) -> Result, DAError> { + let inclusion_data = self + .client + .get_inclusion_data(blob_id) + .await + .map_err(to_retriable_da_error)?; + if let Some(inclusion_data) = inclusion_data { + Ok(Some(InclusionData { + data: inclusion_data, + })) + } else { + Ok(None) + } } fn clone_boxed(&self) -> Box { @@ -60,6 +75,6 @@ impl DataAvailabilityClient for EigenClient { } fn blob_size_limit(&self) -> Option { - Some(1920 * 1024) // 2mb - 128kb as a buffer + Some(RawEigenClient::::blob_size_limit()) } } diff --git a/core/node/da_clients/src/eigen/client_tests.rs b/core/node/da_clients/src/eigen/client_tests.rs new file mode 100644 index 000000000000..e504e4892b61 --- /dev/null +++ b/core/node/da_clients/src/eigen/client_tests.rs @@ -0,0 +1,266 @@ +/// EigenDA Client tests are ignored by default, because they require a remote dependency, +/// which may not always be available, causing tests to be flaky. +/// To run these tests, use the following command: +/// `cargo test -p zksync_da_clients -- --ignored` +#[cfg(test)] +mod tests { + use std::{str::FromStr, time::Duration}; + + use backon::{ConstantBuilder, Retryable}; + use serial_test::serial; + use zksync_config::{configs::da_client::eigen::EigenSecrets, EigenConfig}; + use zksync_da_client::{ + types::{DAError, DispatchResponse}, + DataAvailabilityClient, + }; + use zksync_types::secrets::PrivateKey; + + use crate::eigen::{blob_info::BlobInfo, EigenClient, GetBlobData}; + + impl EigenClient { + pub async fn get_blob_data( + &self, + blob_id: BlobInfo, + ) -> anyhow::Result>, DAError> { + self.client.get_blob_data(blob_id).await + } + + pub async fn get_commitment(&self, blob_id: &str) -> anyhow::Result> { + self.client.get_commitment(blob_id).await + } + } + const STATUS_QUERY_TIMEOUT: u64 = 1800000; // 30 minutes + const STATUS_QUERY_INTERVAL: u64 = 5; // 5 ms + + async fn get_blob_info( + client: &EigenClient, + result: &DispatchResponse, + ) -> anyhow::Result { + let blob_info = (|| async { + let blob_info = client.get_commitment(&result.blob_id).await?; + if blob_info.is_none() { + return Err(anyhow::anyhow!("Blob not found")); + } + Ok(blob_info.unwrap()) + }) + .retry( + &ConstantBuilder::default() + .with_delay(Duration::from_millis(STATUS_QUERY_INTERVAL)) + .with_max_times((STATUS_QUERY_TIMEOUT / STATUS_QUERY_INTERVAL) as usize), + ) + .when(|e| e.to_string().contains("Blob not found")) + .await?; + + Ok(blob_info) + } + + #[derive(Debug, Clone)] + struct MockGetBlobData; + + #[async_trait::async_trait] + impl GetBlobData for MockGetBlobData { + async fn call(&self, _input: &'_ str) -> anyhow::Result>> { + Ok(None) + } + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[serial] + async fn test_non_auth_dispersal() { + let config = EigenConfig { + disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), + settlement_layer_confirmation_depth: -1, + eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + wait_for_finalization: false, + authenticated: false, + g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + chain_id: 17000, + }; + let secrets = EigenSecrets { + private_key: PrivateKey::from_str( + "d08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6", + ) + .unwrap(), + }; + let client = EigenClient::new(config.clone(), secrets, Box::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + + let blob_info = get_blob_info(&client, &result).await.unwrap(); + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data.unwrap(), data); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[serial] + async fn test_auth_dispersal() { + let config = EigenConfig { + disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), + settlement_layer_confirmation_depth: -1, + eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + wait_for_finalization: false, + authenticated: true, + g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + chain_id: 17000, + }; + let secrets = EigenSecrets { + private_key: PrivateKey::from_str( + "d08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6", + ) + .unwrap(), + }; + let client = EigenClient::new(config.clone(), secrets, Box::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + let blob_info = get_blob_info(&client, &result).await.unwrap(); + + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data.unwrap(), data); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[serial] + async fn test_wait_for_finalization() { + let config = EigenConfig { + disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), + wait_for_finalization: true, + authenticated: true, + g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + settlement_layer_confirmation_depth: 0, + eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + chain_id: 17000, + }; + let secrets = EigenSecrets { + private_key: PrivateKey::from_str( + "d08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6", + ) + .unwrap(), + }; + let client = EigenClient::new(config.clone(), secrets, Box::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + let blob_info = get_blob_info(&client, &result).await.unwrap(); + + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data.unwrap(), data); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[serial] + async fn test_settlement_layer_confirmation_depth() { + let config = EigenConfig { + disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), + settlement_layer_confirmation_depth: 5, + eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + wait_for_finalization: false, + authenticated: false, + g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + chain_id: 17000, + }; + let secrets = EigenSecrets { + private_key: PrivateKey::from_str( + "d08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6", + ) + .unwrap(), + }; + let client = EigenClient::new(config.clone(), secrets, Box::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + let blob_info = get_blob_info(&client, &result).await.unwrap(); + + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data.unwrap(), data); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[serial] + async fn test_auth_dispersal_settlement_layer_confirmation_depth() { + let config = EigenConfig { + disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), + settlement_layer_confirmation_depth: 5, + eigenda_eth_rpc: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + wait_for_finalization: false, + authenticated: true, + g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + chain_id: 17000, + }; + let secrets = EigenSecrets { + private_key: PrivateKey::from_str( + "d08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6", + ) + .unwrap(), + }; + let client = EigenClient::new(config.clone(), secrets, Box::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + let blob_info = get_blob_info(&client, &result).await.unwrap(); + + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data.unwrap(), data); + } +} diff --git a/core/node/da_clients/src/eigen/mod.rs b/core/node/da_clients/src/eigen/mod.rs index 699eae894246..c536d204175d 100644 --- a/core/node/da_clients/src/eigen/mod.rs +++ b/core/node/da_clients/src/eigen/mod.rs @@ -1,8 +1,11 @@ +mod blob_info; mod client; +mod client_tests; mod sdk; +mod verifier; +mod verifier_tests; -pub use self::client::EigenClient; - +pub use self::client::{EigenClient, GetBlobData}; #[allow(clippy::all)] pub(crate) mod disperser { include!("generated/disperser.rs"); diff --git a/core/node/da_clients/src/eigen/sdk.rs b/core/node/da_clients/src/eigen/sdk.rs index 7ab7ea3ce33b..b00a9a959887 100644 --- a/core/node/da_clients/src/eigen/sdk.rs +++ b/core/node/da_clients/src/eigen/sdk.rs @@ -1,76 +1,139 @@ -use std::{str::FromStr, time::Duration}; +use std::{str::FromStr, sync::Arc}; use secp256k1::{ecdsa::RecoverableSignature, SecretKey}; -use tokio::sync::mpsc; -use tokio_stream::{wrappers::ReceiverStream, StreamExt}; +use tokio::sync::{mpsc, Mutex}; +use tokio_stream::{wrappers::UnboundedReceiverStream, StreamExt}; use tonic::{ transport::{Channel, ClientTlsConfig, Endpoint}, Streaming, }; - +use zksync_config::EigenConfig; +use zksync_da_client::types::DAError; +use zksync_eth_client::clients::PKSigningClient; +use zksync_types::{url::SensitiveUrl, K256PrivateKey, SLChainId, H160}; +use zksync_web3_decl::client::{Client, DynClient, L1}; + +use super::{ + blob_info::BlobInfo, + disperser::BlobInfo as DisperserBlobInfo, + verifier::{Verifier, VerifierConfig}, + GetBlobData, +}; use crate::eigen::{ - disperser, + blob_info, disperser::{ + self, authenticated_request::Payload::{AuthenticationData, DisperseRequest}, disperser_client::DisperserClient, - AuthenticatedReply, BlobAuthHeader, BlobVerificationProof, DisperseBlobReply, + AuthenticatedReply, BlobAuthHeader, }, }; #[derive(Debug, Clone)] -pub struct RawEigenClient { - client: DisperserClient, - polling_interval: Duration, +pub(crate) struct RawEigenClient { + client: Arc>>, private_key: SecretKey, - account_id: String, + pub config: EigenConfig, + verifier: Verifier, + get_blob_data: Box, } pub(crate) const DATA_CHUNK_SIZE: usize = 32; -impl RawEigenClient { - pub(crate) const BUFFER_SIZE: usize = 1000; +impl RawEigenClient { + const BLOB_SIZE_LIMIT: usize = 1024 * 1024 * 2; // 2 MB pub async fn new( - rpc_node_url: String, - inclusion_polling_interval_ms: u64, private_key: SecretKey, + config: EigenConfig, + get_blob_data: Box, ) -> anyhow::Result { let endpoint = - Endpoint::from_str(rpc_node_url.as_str())?.tls_config(ClientTlsConfig::new())?; - let client = DisperserClient::connect(endpoint) - .await - .map_err(|e| anyhow::anyhow!("Failed to connect to Disperser server: {}", e))?; - let polling_interval = Duration::from_millis(inclusion_polling_interval_ms); + Endpoint::from_str(config.disperser_rpc.as_str())?.tls_config(ClientTlsConfig::new())?; + let client = Arc::new(Mutex::new(DisperserClient::connect(endpoint).await?)); + + let verifier_config = VerifierConfig { + rpc_url: config.eigenda_eth_rpc.clone(), + svc_manager_addr: config.eigenda_svc_manager_address.clone(), + max_blob_size: Self::BLOB_SIZE_LIMIT as u32, + g1_url: config.g1_url.clone(), + g2_url: config.g2_url.clone(), + settlement_layer_confirmation_depth: config.settlement_layer_confirmation_depth.max(0) + as u32, + private_key: hex::encode(private_key.secret_bytes()), + chain_id: config.chain_id, + }; - let account_id = get_account_id(&private_key); + let url = SensitiveUrl::from_str(&verifier_config.rpc_url)?; + let query_client: Client = Client::http(url)?.build(); + let query_client = Box::new(query_client) as Box>; + let signing_client = PKSigningClient::new_raw( + K256PrivateKey::from_bytes(zksync_types::H256::from_str( + &verifier_config.private_key, + )?)?, + H160::from_str(&verifier_config.svc_manager_addr)?, + Verifier::DEFAULT_PRIORITY_FEE_PER_GAS, + SLChainId(verifier_config.chain_id), + query_client, + ); + let verifier = Verifier::new(verifier_config, signing_client) + .await + .map_err(|e| anyhow::anyhow!(format!("Failed to create verifier {:?}", e)))?; Ok(RawEigenClient { client, - polling_interval, private_key, - account_id, + config, + verifier, + get_blob_data, }) } - pub async fn dispatch_blob(&self, data: Vec) -> anyhow::Result { - let mut client_clone = self.client.clone(); - let (tx, rx) = mpsc::channel(Self::BUFFER_SIZE); + pub fn blob_size_limit() -> usize { + Self::BLOB_SIZE_LIMIT + } - let response_stream = client_clone.disperse_blob_authenticated(ReceiverStream::new(rx)); + async fn dispatch_blob_non_authenticated(&self, data: Vec) -> anyhow::Result { let padded_data = convert_by_padding_empty_byte(&data); + let request = disperser::DisperseBlobRequest { + data: padded_data, + custom_quorum_numbers: vec![], + account_id: String::default(), // Account Id is not used in non-authenticated mode + }; + + let disperse_reply = self + .client + .lock() + .await + .disperse_blob(request) + .await? + .into_inner(); + + Ok(hex::encode(disperse_reply.request_id)) + } + + async fn dispatch_blob_authenticated(&self, data: Vec) -> anyhow::Result { + let (tx, rx) = mpsc::unbounded_channel(); // 1. send DisperseBlobRequest - self.disperse_data(padded_data, &tx).await?; + let padded_data = convert_by_padding_empty_byte(&data); + self.disperse_data(padded_data, &tx)?; // this await is blocked until the first response on the stream, so we only await after sending the `DisperseBlobRequest` - let mut response_stream = response_stream.await?.into_inner(); + let mut response_stream = self + .client + .clone() + .lock() + .await + .disperse_blob_authenticated(UnboundedReceiverStream::new(rx)) + .await?; + let response_stream = response_stream.get_mut(); // 2. receive BlobAuthHeader - let blob_auth_header = self.receive_blob_auth_header(&mut response_stream).await?; + let blob_auth_header = self.receive_blob_auth_header(response_stream).await?; // 3. sign and send BlobAuthHeader - self.submit_authentication_data(blob_auth_header.clone(), &tx) - .await?; + self.submit_authentication_data(blob_auth_header.clone(), &tx)?; // 4. receive DisperseBlobReply let reply = response_stream @@ -84,42 +147,84 @@ impl RawEigenClient { let disperser::authenticated_reply::Payload::DisperseReply(disperse_reply) = reply else { return Err(anyhow::anyhow!("Unexpected response from server")); }; + Ok(hex::encode(disperse_reply.request_id)) + } + + pub async fn get_commitment(&self, blob_id: &str) -> anyhow::Result> { + let blob_info = self.try_get_inclusion_data(blob_id.to_string()).await?; + + let Some(blob_info) = blob_info else { + return Ok(None); + }; + let blob_info = blob_info::BlobInfo::try_from(blob_info) + .map_err(|e| anyhow::anyhow!("Failed to convert blob info: {}", e))?; + + let Some(data) = self.get_blob_data(blob_info.clone()).await? else { + return Err(anyhow::anyhow!("Failed to get blob data")); + }; + let data_db = self.get_blob_data.call(blob_id).await?; + if let Some(data_db) = data_db { + if data_db != data { + return Err(anyhow::anyhow!( + "Data from db and from disperser are different" + )); + } + } + self.verifier + .verify_commitment(blob_info.blob_header.commitment.clone(), data) + .map_err(|_| anyhow::anyhow!("Failed to verify commitment"))?; + + let result = self + .verifier + .verify_inclusion_data_against_settlement_layer(blob_info.clone()) + .await; + // in case of an error, the dispatcher will retry, so the need to return None + if result.is_err() { + return Ok(None); + } - // 5. poll for blob status until it reaches the Confirmed state - let verification_proof = self - .await_for_inclusion(client_clone, disperse_reply) - .await?; - let blob_id = format!( - "{}:{}", - verification_proof.batch_id, verification_proof.blob_index - ); tracing::info!("Blob dispatch confirmed, blob id: {}", blob_id); + Ok(Some(blob_info)) + } - Ok(blob_id) + pub async fn get_inclusion_data(&self, blob_id: &str) -> anyhow::Result>> { + let blob_info = self.get_commitment(blob_id).await?; + if let Some(blob_info) = blob_info { + Ok(Some(blob_info.blob_verification_proof.inclusion_proof)) + } else { + Ok(None) + } } - async fn disperse_data( + pub async fn dispatch_blob(&self, data: Vec) -> anyhow::Result { + if self.config.authenticated { + self.dispatch_blob_authenticated(data).await + } else { + self.dispatch_blob_non_authenticated(data).await + } + } + + fn disperse_data( &self, data: Vec, - tx: &mpsc::Sender, + tx: &mpsc::UnboundedSender, ) -> anyhow::Result<()> { let req = disperser::AuthenticatedRequest { payload: Some(DisperseRequest(disperser::DisperseBlobRequest { data, custom_quorum_numbers: vec![], - account_id: self.account_id.clone(), + account_id: get_account_id(&self.private_key), })), }; tx.send(req) - .await .map_err(|e| anyhow::anyhow!("Failed to send DisperseBlobRequest: {}", e)) } - async fn submit_authentication_data( + fn submit_authentication_data( &self, blob_auth_header: BlobAuthHeader, - tx: &mpsc::Sender, + tx: &mpsc::UnboundedSender, ) -> anyhow::Result<()> { // TODO: replace challenge_parameter with actual auth header when it is available let digest = zksync_basic_types::web3::keccak256( @@ -143,7 +248,6 @@ impl RawEigenClient { }; tx.send(req) - .await .map_err(|e| anyhow::anyhow!("Failed to send AuthenticationData: {}", e)) } @@ -171,43 +275,84 @@ impl RawEigenClient { } } - async fn await_for_inclusion( + async fn try_get_inclusion_data( &self, - mut client: DisperserClient, - disperse_blob_reply: DisperseBlobReply, - ) -> anyhow::Result { + request_id: String, + ) -> anyhow::Result> { let polling_request = disperser::BlobStatusRequest { - request_id: disperse_blob_reply.request_id, + request_id: hex::decode(request_id)?, }; - loop { - tokio::time::sleep(self.polling_interval).await; - let resp = client - .get_blob_status(polling_request.clone()) - .await? - .into_inner(); - - match disperser::BlobStatus::try_from(resp.status)? { - disperser::BlobStatus::Processing | disperser::BlobStatus::Dispersing => {} - disperser::BlobStatus::Failed => { - return Err(anyhow::anyhow!("Blob dispatch failed")) - } - disperser::BlobStatus::InsufficientSignatures => { - return Err(anyhow::anyhow!("Insufficient signatures")) - } - disperser::BlobStatus::Confirmed | disperser::BlobStatus::Finalized => { - let verification_proof = resp + let resp = self + .client + .lock() + .await + .get_blob_status(polling_request.clone()) + .await? + .into_inner(); + + match disperser::BlobStatus::try_from(resp.status)? { + disperser::BlobStatus::Processing | disperser::BlobStatus::Dispersing => Ok(None), + disperser::BlobStatus::Failed => Err(anyhow::anyhow!("Blob dispatch failed")), + disperser::BlobStatus::InsufficientSignatures => { + Err(anyhow::anyhow!("Insufficient signatures")) + } + disperser::BlobStatus::Confirmed => { + if !self.config.wait_for_finalization { + let blob_info = resp .info - .ok_or_else(|| anyhow::anyhow!("No blob header in response"))? - .blob_verification_proof - .ok_or_else(|| anyhow::anyhow!("No blob verification proof in response"))?; - - return Ok(verification_proof); + .ok_or_else(|| anyhow::anyhow!("No blob header in response"))?; + return Ok(Some(blob_info)); } - - _ => return Err(anyhow::anyhow!("Received unknown blob status")), + Ok(None) + } + disperser::BlobStatus::Finalized => { + let blob_info = resp + .info + .ok_or_else(|| anyhow::anyhow!("No blob header in response"))?; + Ok(Some(blob_info)) } + + _ => Err(anyhow::anyhow!("Received unknown blob status")), + } + } + + pub async fn get_blob_data( + &self, + blob_info: BlobInfo, + ) -> anyhow::Result>, DAError> { + use anyhow::anyhow; + use zksync_da_client::types::DAError; + + let blob_index = blob_info.blob_verification_proof.blob_index; + let batch_header_hash = blob_info + .blob_verification_proof + .batch_medatada + .batch_header_hash; + let get_response = self + .client + .lock() + .await + .retrieve_blob(disperser::RetrieveBlobRequest { + batch_header_hash, + blob_index, + }) + .await + .map_err(|e| DAError { + error: anyhow!(e), + is_retriable: true, + })? + .into_inner(); + + if get_response.data.is_empty() { + return Err(DAError { + error: anyhow!("Failed to get blob data"), + is_retriable: false, + }); } + + let data = remove_empty_byte_from_padded_bytes(&get_response.data); + Ok(Some(data)) } } @@ -244,3 +389,55 @@ fn convert_by_padding_empty_byte(data: &[u8]) -> Vec { valid_data.truncate(valid_end); valid_data } + +fn remove_empty_byte_from_padded_bytes(data: &[u8]) -> Vec { + let parse_size = DATA_CHUNK_SIZE; + + // Calculate the number of chunks + let data_len = (data.len() + parse_size - 1) / parse_size; + + // Pre-allocate `valid_data` with enough space for all chunks + let mut valid_data = vec![0u8; data_len * (DATA_CHUNK_SIZE - 1)]; + let mut valid_end = data_len * (DATA_CHUNK_SIZE - 1); + + for (i, chunk) in data.chunks(parse_size).enumerate() { + let offset = i * (DATA_CHUNK_SIZE - 1); + + let copy_end = offset + chunk.len() - 1; + valid_data[offset..copy_end].copy_from_slice(&chunk[1..]); + + if i == data_len - 1 && chunk.len() < parse_size { + valid_end = offset + chunk.len() - 1; + } + } + + valid_data.truncate(valid_end); + valid_data +} + +#[cfg(test)] +mod test { + #[test] + fn test_pad_and_unpad() { + let data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9]; + let padded_data = super::convert_by_padding_empty_byte(&data); + let unpadded_data = super::remove_empty_byte_from_padded_bytes(&padded_data); + assert_eq!(data, unpadded_data); + } + + #[test] + fn test_pad_and_unpad_large() { + let data = vec![1; 1000]; + let padded_data = super::convert_by_padding_empty_byte(&data); + let unpadded_data = super::remove_empty_byte_from_padded_bytes(&padded_data); + assert_eq!(data, unpadded_data); + } + + #[test] + fn test_pad_and_unpad_empty() { + let data = Vec::new(); + let padded_data = super::convert_by_padding_empty_byte(&data); + let unpadded_data = super::remove_empty_byte_from_padded_bytes(&padded_data); + assert_eq!(data, unpadded_data); + } +} diff --git a/core/node/da_clients/src/eigen/verifier.rs b/core/node/da_clients/src/eigen/verifier.rs new file mode 100644 index 000000000000..6acd5398d220 --- /dev/null +++ b/core/node/da_clients/src/eigen/verifier.rs @@ -0,0 +1,550 @@ +use std::{collections::HashMap, fs::File, io::copy, path::Path, str::FromStr}; + +use ark_bn254::{Fq, G1Affine}; +use ethabi::{encode, ParamType, Token}; +use rust_kzg_bn254::{blob::Blob, kzg::Kzg, polynomial::PolynomialFormat}; +use tiny_keccak::{Hasher, Keccak}; +use url::Url; +use zksync_basic_types::web3::CallRequest; +use zksync_eth_client::{clients::PKSigningClient, EnrichedClientResult}; +use zksync_types::{ + web3::{self, BlockId, BlockNumber}, + H160, U256, U64, +}; + +use super::blob_info::{BatchHeader, BlobHeader, BlobInfo, G1Commitment}; + +#[async_trait::async_trait] +pub trait VerifierClient: Sync + Send + std::fmt::Debug { + fn clone_boxed(&self) -> Box; + + /// Returns the current block number. + async fn block_number(&self) -> EnrichedClientResult; + + /// Invokes a function on a contract specified by `contract_address` / `contract_abi` using `eth_call`. + async fn call_contract_function( + &self, + request: web3::CallRequest, + block: Option, + ) -> EnrichedClientResult; +} + +#[async_trait::async_trait] +impl VerifierClient for PKSigningClient { + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + async fn block_number(&self) -> EnrichedClientResult { + self.as_ref().block_number().await + } + + async fn call_contract_function( + &self, + request: web3::CallRequest, + block: Option, + ) -> EnrichedClientResult { + self.as_ref().call_contract_function(request, block).await + } +} + +#[derive(Debug)] +pub enum VerificationError { + ServiceManagerError, + KzgError, + WrongProof, + DifferentCommitments, + DifferentRoots, + EmptyHash, + DifferentHashes, + WrongQuorumParams, + QuorumNotConfirmed, + CommitmentNotOnCurve, + CommitmentNotOnCorrectSubgroup, + LinkError, +} + +/// Configuration for the verifier used for authenticated dispersals +#[derive(Debug, Clone)] +pub struct VerifierConfig { + pub rpc_url: String, + pub svc_manager_addr: String, + pub max_blob_size: u32, + pub g1_url: String, + pub g2_url: String, + pub settlement_layer_confirmation_depth: u32, + pub private_key: String, + pub chain_id: u64, +} + +/// Verifier used to verify the integrity of the blob info +/// Kzg is used for commitment verification +/// EigenDA service manager is used to connect to the service manager contract +#[derive(Debug)] +pub struct Verifier { + kzg: Kzg, + cfg: VerifierConfig, + signing_client: Box, +} + +impl Clone for Verifier { + fn clone(&self) -> Self { + Self { + kzg: self.kzg.clone(), + cfg: self.cfg.clone(), + signing_client: self.signing_client.clone_boxed(), + } + } +} + +impl Verifier { + pub const DEFAULT_PRIORITY_FEE_PER_GAS: u64 = 100; + pub const SRSORDER: u32 = 268435456; // 2 ^ 28 + + async fn save_point(url: String, point: String) -> Result<(), VerificationError> { + let url = Url::parse(&url).map_err(|_| VerificationError::LinkError)?; + let response = reqwest::get(url) + .await + .map_err(|_| VerificationError::LinkError)?; + if !response.status().is_success() { + return Err(VerificationError::LinkError); + } + let path = format!("./{}", point); + let path = Path::new(&path); + let mut file = File::create(path).map_err(|_| VerificationError::LinkError)?; + let content = response + .bytes() + .await + .map_err(|_| VerificationError::LinkError)?; + copy(&mut content.as_ref(), &mut file).map_err(|_| VerificationError::LinkError)?; + Ok(()) + } + async fn save_points(url_g1: String, url_g2: String) -> Result { + Self::save_point(url_g1.clone(), "g1.point".to_string()).await?; + Self::save_point(url_g2.clone(), "g2.point.powerOf2".to_string()).await?; + + Ok(".".to_string()) + } + pub async fn new( + cfg: VerifierConfig, + signing_client: T, + ) -> Result { + let srs_points_to_load = cfg.max_blob_size / 32; + let path = Self::save_points(cfg.clone().g1_url, cfg.clone().g2_url).await?; + let kzg = Kzg::setup( + &format!("{}{}", path, "/g1.point"), + "", + &format!("{}{}", path, "/g2.point.powerOf2"), + Self::SRSORDER, + srs_points_to_load, + "".to_string(), + ); + let kzg = kzg.map_err(|e| { + tracing::error!("Failed to setup KZG: {:?}", e); + VerificationError::KzgError + })?; + + Ok(Self { + kzg, + cfg, + signing_client: Box::new(signing_client), + }) + } + + /// Return the commitment from a blob + fn commit(&self, blob: Vec) -> Result { + let blob = Blob::from_bytes_and_pad(&blob.to_vec()); + self.kzg + .blob_to_kzg_commitment(&blob, PolynomialFormat::InEvaluationForm) + .map_err(|_| VerificationError::KzgError) + } + + /// Compare the given commitment with the commitment generated with the blob + pub fn verify_commitment( + &self, + expected_commitment: G1Commitment, + blob: Vec, + ) -> Result<(), VerificationError> { + let actual_commitment = self.commit(blob)?; + let expected_commitment = G1Affine::new_unchecked( + Fq::from(num_bigint::BigUint::from_bytes_be(&expected_commitment.x)), + Fq::from(num_bigint::BigUint::from_bytes_be(&expected_commitment.y)), + ); + if !expected_commitment.is_on_curve() { + return Err(VerificationError::CommitmentNotOnCurve); + } + if !expected_commitment.is_in_correct_subgroup_assuming_on_curve() { + return Err(VerificationError::CommitmentNotOnCorrectSubgroup); + } + if actual_commitment != expected_commitment { + return Err(VerificationError::DifferentCommitments); + } + Ok(()) + } + + pub fn hash_encode_blob_header(&self, blob_header: BlobHeader) -> Vec { + let mut blob_quorums = vec![]; + for quorum in blob_header.blob_quorum_params { + let quorum = Token::Tuple(vec![ + Token::Uint(ethabi::Uint::from(quorum.quorum_number)), + Token::Uint(ethabi::Uint::from(quorum.adversary_threshold_percentage)), + Token::Uint(ethabi::Uint::from(quorum.confirmation_threshold_percentage)), + Token::Uint(ethabi::Uint::from(quorum.chunk_length)), + ]); + blob_quorums.push(quorum); + } + let blob_header = Token::Tuple(vec![ + Token::Tuple(vec![ + Token::Uint(ethabi::Uint::from_big_endian(&blob_header.commitment.x)), + Token::Uint(ethabi::Uint::from_big_endian(&blob_header.commitment.y)), + ]), + Token::Uint(ethabi::Uint::from(blob_header.data_length)), + Token::Array(blob_quorums), + ]); + + let encoded = encode(&[blob_header]); + + let mut keccak = Keccak::v256(); + keccak.update(&encoded); + let mut hash = [0u8; 32]; + keccak.finalize(&mut hash); + hash.to_vec() + } + + pub fn process_inclusion_proof( + &self, + proof: &[u8], + leaf: &[u8], + index: u32, + ) -> Result, VerificationError> { + let mut index = index; + if proof.is_empty() || proof.len() % 32 != 0 { + return Err(VerificationError::WrongProof); + } + let mut computed_hash = leaf.to_vec(); + for i in 0..proof.len() / 32 { + let mut combined = proof[i * 32..(i + 1) * 32] + .iter() + .chain(computed_hash.iter()) + .cloned() + .collect::>(); + if index % 2 == 0 { + combined = computed_hash + .iter() + .chain(proof[i * 32..(i + 1) * 32].iter()) + .cloned() + .collect::>(); + }; + let mut keccak = Keccak::v256(); + keccak.update(&combined); + let mut hash = [0u8; 32]; + keccak.finalize(&mut hash); + computed_hash = hash.to_vec(); + index /= 2; + } + + Ok(computed_hash) + } + + /// Verifies the certificate's batch root + pub fn verify_merkle_proof(&self, cert: BlobInfo) -> Result<(), VerificationError> { + let inclusion_proof = cert.blob_verification_proof.inclusion_proof; + let root = cert + .blob_verification_proof + .batch_medatada + .batch_header + .batch_root; + let blob_index = cert.blob_verification_proof.blob_index; + let blob_header = cert.blob_header; + + let blob_header_hash = self.hash_encode_blob_header(blob_header); + let mut keccak = Keccak::v256(); + keccak.update(&blob_header_hash); + let mut leaf_hash = [0u8; 32]; + keccak.finalize(&mut leaf_hash); + + let generated_root = + self.process_inclusion_proof(&inclusion_proof, &leaf_hash, blob_index)?; + + if generated_root != root { + return Err(VerificationError::DifferentRoots); + } + Ok(()) + } + + fn hash_batch_metadata( + &self, + batch_header: BatchHeader, + signatory_record_hash: Vec, + confirmation_block_number: u32, + ) -> Vec { + let batch_header_token = Token::Tuple(vec![ + Token::FixedBytes(batch_header.batch_root), + Token::Bytes(batch_header.quorum_numbers), + Token::Bytes(batch_header.quorum_signed_percentages), + Token::Uint(ethabi::Uint::from(batch_header.reference_block_number)), + ]); + + let encoded = encode(&[batch_header_token]); + + let mut keccak = Keccak::v256(); + keccak.update(&encoded); + let mut header_hash = [0u8; 32]; + keccak.finalize(&mut header_hash); + + let hash_token = Token::Tuple(vec![ + Token::FixedBytes(header_hash.to_vec()), + Token::FixedBytes(signatory_record_hash), + ]); + + let mut hash_encoded = encode(&[hash_token]); + + hash_encoded.append(&mut confirmation_block_number.to_be_bytes().to_vec()); + + let mut keccak = Keccak::v256(); + keccak.update(&hash_encoded); + let mut hash = [0u8; 32]; + keccak.finalize(&mut hash); + + hash.to_vec() + } + + /// Retrieves the block to make the request to the service manager + async fn get_context_block(&self) -> Result { + let latest = self + .signing_client + .as_ref() + .block_number() + .await + .map_err(|_| VerificationError::ServiceManagerError)? + .as_u64(); + + if self.cfg.settlement_layer_confirmation_depth == 0 { + return Ok(latest); + } + Ok(latest - (self.cfg.settlement_layer_confirmation_depth as u64 - 1)) + } + + async fn call_batch_id_to_metadata_hash( + &self, + blob_info: BlobInfo, + ) -> Result, VerificationError> { + let context_block = self.get_context_block().await?; + + let func_selector = + ethabi::short_signature("batchIdToBatchMetadataHash", &[ParamType::Uint(32)]); + let mut data = func_selector.to_vec(); + let mut batch_id_vec = [0u8; 32]; + U256::from(blob_info.blob_verification_proof.batch_id).to_big_endian(&mut batch_id_vec); + data.append(batch_id_vec.to_vec().as_mut()); + + let call_request = CallRequest { + to: Some( + H160::from_str(&self.cfg.svc_manager_addr) + .map_err(|_| VerificationError::ServiceManagerError)?, + ), + data: Some(zksync_basic_types::web3::Bytes(data)), + ..Default::default() + }; + + let res = self + .signing_client + .as_ref() + .call_contract_function( + call_request, + Some(BlockId::Number(BlockNumber::Number(context_block.into()))), + ) + .await + .map_err(|_| VerificationError::ServiceManagerError)?; + + Ok(res.0.to_vec()) + } + + /// Verifies the certificate batch hash + pub async fn verify_batch(&self, blob_info: BlobInfo) -> Result<(), VerificationError> { + let expected_hash = self + .call_batch_id_to_metadata_hash(blob_info.clone()) + .await?; + + if expected_hash == vec![0u8; 32] { + return Err(VerificationError::EmptyHash); + } + + let actual_hash = self.hash_batch_metadata( + blob_info + .blob_verification_proof + .batch_medatada + .batch_header, + blob_info + .blob_verification_proof + .batch_medatada + .signatory_record_hash, + blob_info + .blob_verification_proof + .batch_medatada + .confirmation_block_number, + ); + + if expected_hash != actual_hash { + return Err(VerificationError::DifferentHashes); + } + Ok(()) + } + + fn decode_bytes(&self, encoded: Vec) -> Result, String> { + // Ensure the input has at least 64 bytes (offset + length) + if encoded.len() < 64 { + return Err("Encoded data is too short".to_string()); + } + + // Read the offset (first 32 bytes) + let offset = { + let mut offset_bytes = [0u8; 32]; + offset_bytes.copy_from_slice(&encoded[0..32]); + usize::from_be_bytes( + offset_bytes[24..32] + .try_into() + .map_err(|_| "Offset is too large")?, + ) + }; + + // Check if offset is valid + if offset + 32 > encoded.len() { + return Err("Offset points outside the encoded data".to_string()); + } + + // Read the length (32 bytes at the offset position) + let length = { + let mut length_bytes = [0u8; 32]; + length_bytes.copy_from_slice(&encoded[offset..offset + 32]); + usize::from_be_bytes( + length_bytes[24..32] + .try_into() + .map_err(|_| "Offset is too large")?, + ) + }; + + // Check if the length is valid + if offset + 32 + length > encoded.len() { + return Err("Length extends beyond the encoded data".to_string()); + } + + // Extract the bytes data + let data = encoded[offset + 32..offset + 32 + length].to_vec(); + Ok(data) + } + + async fn get_quorum_adversary_threshold( + &self, + quorum_number: u32, + ) -> Result { + let func_selector = ethabi::short_signature("quorumAdversaryThresholdPercentages", &[]); + let data = func_selector.to_vec(); + + let call_request = CallRequest { + to: Some( + H160::from_str(&self.cfg.svc_manager_addr) + .map_err(|_| VerificationError::ServiceManagerError)?, + ), + data: Some(zksync_basic_types::web3::Bytes(data)), + ..Default::default() + }; + + let res = self + .signing_client + .as_ref() + .call_contract_function(call_request, None) + .await + .map_err(|_| VerificationError::ServiceManagerError)?; + + let percentages = self + .decode_bytes(res.0.to_vec()) + .map_err(|_| VerificationError::ServiceManagerError)?; + + if percentages.len() > quorum_number as usize { + return Ok(percentages[quorum_number as usize]); + } + Ok(0) + } + + async fn call_quorum_numbers_required(&self) -> Result, VerificationError> { + let func_selector = ethabi::short_signature("quorumNumbersRequired", &[]); + let data = func_selector.to_vec(); + let call_request = CallRequest { + to: Some( + H160::from_str(&self.cfg.svc_manager_addr) + .map_err(|_| VerificationError::ServiceManagerError)?, + ), + data: Some(zksync_basic_types::web3::Bytes(data)), + ..Default::default() + }; + + let res = self + .signing_client + .as_ref() + .call_contract_function(call_request, None) + .await + .map_err(|_| VerificationError::ServiceManagerError)?; + + self.decode_bytes(res.0.to_vec()) + .map_err(|_| VerificationError::ServiceManagerError) + } + + /// Verifies that the certificate's blob quorum params are correct + pub async fn verify_security_params(&self, cert: BlobInfo) -> Result<(), VerificationError> { + let blob_header = cert.blob_header; + let batch_header = cert.blob_verification_proof.batch_medatada.batch_header; + + let mut confirmed_quorums: HashMap = HashMap::new(); + for i in 0..blob_header.blob_quorum_params.len() { + if batch_header.quorum_numbers[i] as u32 + != blob_header.blob_quorum_params[i].quorum_number + { + return Err(VerificationError::WrongQuorumParams); + } + if blob_header.blob_quorum_params[i].adversary_threshold_percentage + > blob_header.blob_quorum_params[i].confirmation_threshold_percentage + { + return Err(VerificationError::WrongQuorumParams); + } + let quorum_adversary_threshold = self + .get_quorum_adversary_threshold(blob_header.blob_quorum_params[i].quorum_number) + .await?; + + if quorum_adversary_threshold > 0 + && blob_header.blob_quorum_params[i].adversary_threshold_percentage + < quorum_adversary_threshold as u32 + { + return Err(VerificationError::WrongQuorumParams); + } + + if (batch_header.quorum_signed_percentages[i] as u32) + < blob_header.blob_quorum_params[i].confirmation_threshold_percentage + { + return Err(VerificationError::WrongQuorumParams); + } + + confirmed_quorums.insert(blob_header.blob_quorum_params[i].quorum_number, true); + } + + let required_quorums = self.call_quorum_numbers_required().await?; + + for quorum in required_quorums { + if !confirmed_quorums.contains_key(&(quorum as u32)) { + return Err(VerificationError::QuorumNotConfirmed); + } + } + Ok(()) + } + + /// Verifies that the certificate is valid + pub async fn verify_inclusion_data_against_settlement_layer( + &self, + cert: BlobInfo, + ) -> Result<(), VerificationError> { + self.verify_batch(cert.clone()).await?; + self.verify_merkle_proof(cert.clone())?; + self.verify_security_params(cert.clone()).await?; + Ok(()) + } +} diff --git a/core/node/da_clients/src/eigen/verifier_tests.rs b/core/node/da_clients/src/eigen/verifier_tests.rs new file mode 100644 index 000000000000..bdea8f9a9960 --- /dev/null +++ b/core/node/da_clients/src/eigen/verifier_tests.rs @@ -0,0 +1,816 @@ +#[cfg(test)] +mod test { + use std::{collections::HashMap, str::FromStr}; + + use zksync_eth_client::{clients::PKSigningClient, EnrichedClientResult}; + use zksync_types::{ + url::SensitiveUrl, + web3::{BlockId, Bytes, CallRequest}, + K256PrivateKey, SLChainId, H160, U64, + }; + use zksync_web3_decl::client::{Client, DynClient, L1}; + + use crate::eigen::{ + blob_info::{ + BatchHeader, BatchMetadata, BlobHeader, BlobInfo, BlobQuorumParam, + BlobVerificationProof, G1Commitment, + }, + verifier::{VerificationError, Verifier, VerifierClient, VerifierConfig}, + }; + + fn get_verifier_config() -> VerifierConfig { + VerifierConfig { + rpc_url: "https://ethereum-holesky-rpc.publicnode.com".to_string(), + svc_manager_addr: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + max_blob_size: 2 * 1024 * 1024, + g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + settlement_layer_confirmation_depth: 0, + private_key: "0xd08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6" + .to_string(), + chain_id: 17000, + } + } + + /// Mock struct for the Verifier + /// Used to avoid making actual calls to a remote disperser + /// and possible making the CI fail due to network issues. + /// To run tests with the actual verifier run: + /// `cargo test -p zksync_da_clients -- --ignored` + #[derive(Debug)] + pub struct MockVerifierClient { + replies: HashMap, + } + + impl MockVerifierClient { + pub fn new(replies: HashMap) -> Self { + Self { replies } + } + } + + #[async_trait::async_trait] + impl VerifierClient for MockVerifierClient { + fn clone_boxed(&self) -> Box { + Box::new(Self { + replies: self.replies.clone(), + }) + } + + async fn block_number(&self) -> EnrichedClientResult { + Ok(U64::from(42)) + } + + async fn call_contract_function( + &self, + request: CallRequest, + _block: Option, + ) -> EnrichedClientResult { + let req = serde_json::to_string(&request).unwrap(); + Ok(self.replies.get(&req).unwrap().clone()) + } + } + + fn create_remote_signing_client(cfg: VerifierConfig) -> PKSigningClient { + let url = SensitiveUrl::from_str(&cfg.rpc_url).unwrap(); + let query_client: Client = Client::http(url).unwrap().build(); + let query_client = Box::new(query_client) as Box>; + PKSigningClient::new_raw( + K256PrivateKey::from_bytes( + zksync_types::H256::from_str(&cfg.private_key) + .map_err(|_| VerificationError::ServiceManagerError) + .unwrap(), + ) + .map_err(|_| VerificationError::ServiceManagerError) + .unwrap(), + zksync_types::H160::from_str(&cfg.svc_manager_addr) + .map_err(|_| VerificationError::ServiceManagerError) + .unwrap(), + Verifier::DEFAULT_PRIORITY_FEE_PER_GAS, + SLChainId(cfg.chain_id), + query_client, + ) + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + async fn test_verify_commitment() { + let cfg = get_verifier_config(); + let signing_client = create_remote_signing_client(cfg.clone()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let commitment = G1Commitment { + x: vec![ + 22, 11, 176, 29, 82, 48, 62, 49, 51, 119, 94, 17, 156, 142, 248, 96, 240, 183, 134, + 85, 152, 5, 74, 27, 175, 83, 162, 148, 17, 110, 201, 74, + ], + y: vec![ + 12, 132, 236, 56, 147, 6, 176, 135, 244, 166, 21, 18, 87, 76, 122, 3, 23, 22, 254, + 236, 148, 129, 110, 207, 131, 116, 58, 170, 4, 130, 191, 157, + ], + }; + let blob = vec![1u8; 100]; // Actual blob sent was this blob but kzg-padded, but Blob::from_bytes_and_pad padds it inside, so we don't need to pad it here. + let result = verifier.verify_commitment(commitment, blob); + assert!(result.is_ok()); + } + + /// Test the verification of the commitment with a mocked verifier. + /// To test actual behaviour of the verifier, run the test above + #[tokio::test] + async fn test_verify_commitment_mocked() { + let cfg = get_verifier_config(); + let signing_client = MockVerifierClient::new(HashMap::new()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let commitment = G1Commitment { + x: vec![ + 22, 11, 176, 29, 82, 48, 62, 49, 51, 119, 94, 17, 156, 142, 248, 96, 240, 183, 134, + 85, 152, 5, 74, 27, 175, 83, 162, 148, 17, 110, 201, 74, + ], + y: vec![ + 12, 132, 236, 56, 147, 6, 176, 135, 244, 166, 21, 18, 87, 76, 122, 3, 23, 22, 254, + 236, 148, 129, 110, 207, 131, 116, 58, 170, 4, 130, 191, 157, + ], + }; + let blob = vec![1u8; 100]; // Actual blob sent was this blob but kzg-padded, but Blob::from_bytes_and_pad padds it inside, so we don't need to pad it here. + let result = verifier.verify_commitment(commitment, blob); + assert!(result.is_ok()); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + async fn test_verify_merkle_proof() { + let cfg = get_verifier_config(); + let signing_client = create_remote_signing_client(cfg.clone()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, + 8, 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, + 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, + 146, 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, + 140, 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, + 125, 123, 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, + 157, 203, 22, 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, + 128, 112, 84, 34, 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, + 5, 120, 18, 187, 51, 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, + 166, 66, 157, 255, 237, 69, 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, + 156, 220, 159, 4, 99, 48, 191, 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, + 182, 109, 207, 197, 239, 161, 132, 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, + 253, 23, 169, 75, 236, 211, 126, 121, 132, 191, 68, 167, 200, 16, 154, 149, + 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, 153, 30, 209, 238, 53, 233, 148, + 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, 255, 219, 170, 98, 17, 160, 179, + 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, 73, 78, 79, 72, 253, 105, + 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_merkle_proof(cert); + assert!(result.is_ok()); + } + + /// Test the verificarion of a merkle proof with a mocked verifier. + /// To test actual behaviour of the verifier, run the test above + #[tokio::test] + async fn test_verify_merkle_proof_mocked() { + let cfg = get_verifier_config(); + let signing_client = MockVerifierClient::new(HashMap::new()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, + 8, 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, + 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, + 146, 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, + 140, 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, + 125, 123, 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, + 157, 203, 22, 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, + 128, 112, 84, 34, 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, + 5, 120, 18, 187, 51, 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, + 166, 66, 157, 255, 237, 69, 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, + 156, 220, 159, 4, 99, 48, 191, 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, + 182, 109, 207, 197, 239, 161, 132, 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, + 253, 23, 169, 75, 236, 211, 126, 121, 132, 191, 68, 167, 200, 16, 154, 149, + 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, 153, 30, 209, 238, 53, 233, 148, + 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, 255, 219, 170, 98, 17, 160, 179, + 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, 73, 78, 79, 72, 253, 105, + 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_merkle_proof(cert); + assert!(result.is_ok()); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + async fn test_hash_blob_header() { + let cfg = get_verifier_config(); + let signing_client = create_remote_signing_client(cfg.clone()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let blob_header = BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, + ], + }, + data_length: 2, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 2, + adversary_threshold_percentage: 4, + confirmation_threshold_percentage: 5, + chunk_length: 6, + }, + BlobQuorumParam { + quorum_number: 2, + adversary_threshold_percentage: 4, + confirmation_threshold_percentage: 5, + chunk_length: 6, + }, + ], + }; + let result = verifier.hash_encode_blob_header(blob_header); + let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; + assert_eq!(result, hex::decode(expected).unwrap()); + } + + /// Test hashing of a blob header with a mocked verifier. + /// To test actual behaviour of the verifier, run the test above + #[tokio::test] + async fn test_hash_blob_header_mocked() { + let cfg = get_verifier_config(); + let signing_client = MockVerifierClient::new(HashMap::new()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let blob_header = BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, + ], + }, + data_length: 2, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 2, + adversary_threshold_percentage: 4, + confirmation_threshold_percentage: 5, + chunk_length: 6, + }, + BlobQuorumParam { + quorum_number: 2, + adversary_threshold_percentage: 4, + confirmation_threshold_percentage: 5, + chunk_length: 6, + }, + ], + }; + let result = verifier.hash_encode_blob_header(blob_header); + let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; + assert_eq!(result, hex::decode(expected).unwrap()); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + async fn test_inclusion_proof() { + let cfg = get_verifier_config(); + let signing_client = create_remote_signing_client(cfg.clone()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let proof = hex::decode("c455c1ea0e725d7ea3e5f29e9f48be8fc2787bb0a914d5a86710ba302c166ac4f626d76f67f1055bb960a514fb8923af2078fd84085d712655b58a19612e8cd15c3e4ac1cef57acde3438dbcf63f47c9fefe1221344c4d5c1a4943dd0d1803091ca81a270909dc0e146841441c9bd0e08e69ce6168181a3e4060ffacf3627480bec6abdd8d7bb92b49d33f180c42f49e041752aaded9c403db3a17b85e48a11e9ea9a08763f7f383dab6d25236f1b77c12b4c49c5cdbcbea32554a604e3f1d2f466851cb43fe73617b3d01e665e4c019bf930f92dea7394c25ed6a1e200d051fb0c30a2193c459f1cfef00bf1ba6656510d16725a4d1dc031cb759dbc90bab427b0f60ddc6764681924dda848824605a4f08b7f526fe6bd4572458c94e83fbf2150f2eeb28d3011ec921996dc3e69efa52d5fcf3182b20b56b5857a926aa66605808079b4d52c0c0cfe06923fa92e65eeca2c3e6126108e8c1babf5ac522f4d7").unwrap(); + let leaf = hex::decode("f6106e6ae4631e68abe0fa898cedbe97dbae6c7efb1b088c5aa2e8b91190ff96") + .unwrap(); + let expected_root = + hex::decode("7390b8023db8248123dcaeca57fa6c9340bef639e204f2278fc7ec3d46ad071b") + .unwrap(); + + let actual_root = verifier + .process_inclusion_proof(&proof, &leaf, 580) + .unwrap(); + + assert_eq!(actual_root, expected_root); + } + + /// Test proof inclusion with a mocked verifier. + /// To test actual behaviour of the verifier, run the test above + #[tokio::test] + async fn test_inclusion_proof_mocked() { + let cfg = get_verifier_config(); + let signing_client = MockVerifierClient::new(HashMap::new()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let proof = hex::decode("c455c1ea0e725d7ea3e5f29e9f48be8fc2787bb0a914d5a86710ba302c166ac4f626d76f67f1055bb960a514fb8923af2078fd84085d712655b58a19612e8cd15c3e4ac1cef57acde3438dbcf63f47c9fefe1221344c4d5c1a4943dd0d1803091ca81a270909dc0e146841441c9bd0e08e69ce6168181a3e4060ffacf3627480bec6abdd8d7bb92b49d33f180c42f49e041752aaded9c403db3a17b85e48a11e9ea9a08763f7f383dab6d25236f1b77c12b4c49c5cdbcbea32554a604e3f1d2f466851cb43fe73617b3d01e665e4c019bf930f92dea7394c25ed6a1e200d051fb0c30a2193c459f1cfef00bf1ba6656510d16725a4d1dc031cb759dbc90bab427b0f60ddc6764681924dda848824605a4f08b7f526fe6bd4572458c94e83fbf2150f2eeb28d3011ec921996dc3e69efa52d5fcf3182b20b56b5857a926aa66605808079b4d52c0c0cfe06923fa92e65eeca2c3e6126108e8c1babf5ac522f4d7").unwrap(); + let leaf = hex::decode("f6106e6ae4631e68abe0fa898cedbe97dbae6c7efb1b088c5aa2e8b91190ff96") + .unwrap(); + let expected_root = + hex::decode("7390b8023db8248123dcaeca57fa6c9340bef639e204f2278fc7ec3d46ad071b") + .unwrap(); + + let actual_root = verifier + .process_inclusion_proof(&proof, &leaf, 580) + .unwrap(); + + assert_eq!(actual_root, expected_root); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + async fn test_verify_batch() { + let cfg = get_verifier_config(); + let signing_client = create_remote_signing_client(cfg.clone()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, + 8, 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, + 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, + 146, 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, + 140, 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, + 125, 123, 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, + 157, 203, 22, 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, + 128, 112, 84, 34, 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, + 5, 120, 18, 187, 51, 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, + 166, 66, 157, 255, 237, 69, 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, + 156, 220, 159, 4, 99, 48, 191, 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, + 182, 109, 207, 197, 239, 161, 132, 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, + 253, 23, 169, 75, 236, 211, 126, 121, 132, 191, 68, 167, 200, 16, 154, 149, + 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, 153, 30, 209, 238, 53, 233, 148, + 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, 255, 219, 170, 98, 17, 160, 179, + 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, 73, 78, 79, 72, 253, 105, + 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_batch(cert).await; + assert!(result.is_ok()); + } + + /// Test batch verification with a mocked verifier. + /// To test actual behaviour of the verifier, run the test above + #[tokio::test] + async fn test_verify_batch_mocked() { + let mut mock_replies = HashMap::new(); + let mock_req = CallRequest { + from: None, + to: Some(H160::from_str("0xd4a7e1bd8015057293f0d0a557088c286942e84b").unwrap()), + gas: None, + gas_price: None, + value: None, + data: Some(Bytes::from( + hex::decode( + "eccbbfc900000000000000000000000000000000000000000000000000000000000103cb", + ) + .unwrap(), + )), + transaction_type: None, + access_list: None, + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + }; + let mock_req = serde_json::to_string(&mock_req).unwrap(); + let mock_res = Bytes::from( + hex::decode("60933e76989e57d6fd210ae2fc3086958d708660ee6927f91963047ab1a91ba8") + .unwrap(), + ); + mock_replies.insert(mock_req, mock_res); + + let cfg = get_verifier_config(); + let signing_client = MockVerifierClient::new(mock_replies); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, + 8, 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, + 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, + 146, 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, + 140, 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, + 125, 123, 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, + 157, 203, 22, 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, + 128, 112, 84, 34, 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, + 5, 120, 18, 187, 51, 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, + 166, 66, 157, 255, 237, 69, 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, + 156, 220, 159, 4, 99, 48, 191, 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, + 182, 109, 207, 197, 239, 161, 132, 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, + 253, 23, 169, 75, 236, 211, 126, 121, 132, 191, 68, 167, 200, 16, 154, 149, + 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, 153, 30, 209, 238, 53, 233, 148, + 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, 255, 219, 170, 98, 17, 160, 179, + 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, 73, 78, 79, 72, 253, 105, + 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_batch(cert).await; + assert!(result.is_ok()); + } + + // #[ignore = "depends on external RPC"] + #[tokio::test] + async fn test_verify_security_params() { + let cfg = get_verifier_config(); + let signing_client = create_remote_signing_client(cfg.clone()); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, + 8, 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, + 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, + 146, 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, + 140, 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, + 125, 123, 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, + 157, 203, 22, 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, + 128, 112, 84, 34, 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, + 5, 120, 18, 187, 51, 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, + 166, 66, 157, 255, 237, 69, 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, + 156, 220, 159, 4, 99, 48, 191, 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, + 182, 109, 207, 197, 239, 161, 132, 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, + 253, 23, 169, 75, 236, 211, 126, 121, 132, 191, 68, 167, 200, 16, 154, 149, + 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, 153, 30, 209, 238, 53, 233, 148, + 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, 255, 219, 170, 98, 17, 160, 179, + 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, 73, 78, 79, 72, 253, 105, + 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_security_params(cert).await; + assert!(result.is_ok()); + } + + /// Test security params verification with a mocked verifier. + /// To test actual behaviour of the verifier, run the test above + #[tokio::test] + async fn test_verify_security_params_mocked() { + let mut mock_replies = HashMap::new(); + + // First request + let mock_req = CallRequest { + from: None, + to: Some(H160::from_str("0xd4a7e1bd8015057293f0d0a557088c286942e84b").unwrap()), + gas: None, + gas_price: None, + value: None, + data: Some(Bytes::from(hex::decode("8687feae").unwrap())), + transaction_type: None, + access_list: None, + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + }; + let mock_req = serde_json::to_string(&mock_req).unwrap(); + let mock_res = Bytes::from( + hex::decode("000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020001000000000000000000000000000000000000000000000000000000000000") + .unwrap(), + ); + mock_replies.insert(mock_req, mock_res); + + // Second request + let mock_req = CallRequest { + from: None, + to: Some(H160::from_str("0xd4a7e1bd8015057293f0d0a557088c286942e84b").unwrap()), + gas: None, + gas_price: None, + value: None, + data: Some(Bytes::from(hex::decode("e15234ff").unwrap())), + transaction_type: None, + access_list: None, + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + }; + let mock_req = serde_json::to_string(&mock_req).unwrap(); + let mock_res = Bytes::from( + hex::decode("000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020001000000000000000000000000000000000000000000000000000000000000") + .unwrap(), + ); + mock_replies.insert(mock_req, mock_res); + + let cfg = get_verifier_config(); + let signing_client = MockVerifierClient::new(mock_replies); + let verifier = Verifier::new(cfg, signing_client).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, + 8, 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, + 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, + 146, 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, + 140, 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, + 125, 123, 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, + 157, 203, 22, 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, + 128, 112, 84, 34, 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, + 5, 120, 18, 187, 51, 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, + 166, 66, 157, 255, 237, 69, 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, + 156, 220, 159, 4, 99, 48, 191, 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, + 182, 109, 207, 197, 239, 161, 132, 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, + 253, 23, 169, 75, 236, 211, 126, 121, 132, 191, 68, 167, 200, 16, 154, 149, + 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, 153, 30, 209, 238, 53, 233, 148, + 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, 255, 219, 170, 98, 17, 160, 179, + 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, 73, 78, 79, 72, 253, 105, + 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_security_params(cert).await; + assert!(result.is_ok()); + } +} diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index 2cdde9951be9..2f46419f7972 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -83,6 +83,7 @@ impl DataAvailabilityDispatcher { for batch in batches { let dispatch_latency = METRICS.blob_dispatch_latency.start(); + METRICS.blobs_pending_dispatch.inc_by(1); let dispatch_response = retry(self.config.max_retries(), batch.l1_batch_number, || { self.client .dispatch_blob(batch.l1_batch_number.0, batch.pubdata.clone()) @@ -113,6 +114,9 @@ impl DataAvailabilityDispatcher { .last_dispatched_l1_batch .set(batch.l1_batch_number.0 as usize); METRICS.blob_size.observe(batch.pubdata.len()); + METRICS.blobs_dispatched.inc_by(1); + METRICS.blobs_pending_dispatch.dec_by(1); + tracing::info!( "Dispatched a DA for batch_number: {}, pubdata_size: {}, dispatch_latency: {dispatch_latency_duration:?}", batch.l1_batch_number, @@ -170,6 +174,7 @@ impl DataAvailabilityDispatcher { METRICS .last_included_l1_batch .set(blob_info.l1_batch_number.0 as usize); + METRICS.blobs_included.inc_by(1); tracing::info!( "Received an inclusion data for a batch_number: {}, inclusion_latency_seconds: {}", diff --git a/core/node/da_dispatcher/src/metrics.rs b/core/node/da_dispatcher/src/metrics.rs index 67ac5ed68222..4c21e556abe1 100644 --- a/core/node/da_dispatcher/src/metrics.rs +++ b/core/node/da_dispatcher/src/metrics.rs @@ -19,6 +19,12 @@ pub(super) struct DataAvailabilityDispatcherMetrics { /// Buckets are bytes ranging from 1 KB to 16 MB, which has to satisfy all blob size values. #[metrics(buckets = Buckets::exponential(1_024.0..=16.0 * 1_024.0 * 1_024.0, 2.0), unit = Unit::Bytes)] pub blob_size: Histogram, + /// Amount of pending blobs to be dispatched. + pub blobs_pending_dispatch: Gauge, + /// Total number of blobs dispatched. + pub blobs_dispatched: Gauge, + /// Total number of blobs included. + pub blobs_included: Gauge, /// Number of transactions resent by the DA dispatcher. #[metrics(buckets = Buckets::linear(0.0..=10.0, 1.0))] diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index aab6d2e43d76..53bdc24c5ba4 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -131,6 +131,7 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { local_root: Some(H256::default()), aggregation_root: Some(H256::default()), da_inclusion_data: Some(vec![]), + da_blob_id: Some(vec![]), } } diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs index d5391ee433f9..79abf5d0deee 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs @@ -1,9 +1,14 @@ use zksync_config::{configs::da_client::eigen::EigenSecrets, EigenConfig}; use zksync_da_client::DataAvailabilityClient; -use zksync_da_clients::eigen::EigenClient; +use zksync_da_clients::eigen::{EigenClient, GetBlobData}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_node_framework_derive::FromContext; use crate::{ - implementations::resources::da_client::DAClientResource, + implementations::resources::{ + da_client::DAClientResource, + pools::{MasterPool, PoolResource}, + }, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; @@ -20,6 +25,12 @@ impl EigenWiringLayer { } } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + #[derive(Debug, IntoContext)] #[context(crate = crate)] pub struct Output { @@ -28,19 +39,42 @@ pub struct Output { #[async_trait::async_trait] impl WiringLayer for EigenWiringLayer { - type Input = (); + type Input = Input; type Output = Output; fn layer_name(&self) -> &'static str { "eigen_client_layer" } - async fn wire(self, _input: Self::Input) -> Result { - let client: Box = - Box::new(EigenClient::new(self.config, self.secrets).await?); + async fn wire(self, input: Self::Input) -> Result { + let master_pool = input.master_pool.get_custom(2).await?; + let get_blob_from_db = GetBlobFromDB { pool: master_pool }; + let client: Box = Box::new( + EigenClient::new(self.config, self.secrets, Box::new(get_blob_from_db)).await?, + ); Ok(Self::Output { client: DAClientResource(client), }) } } + +#[derive(Debug, Clone)] +pub struct GetBlobFromDB { + pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl GetBlobData for GetBlobFromDB { + async fn call(&self, input: &'_ str) -> anyhow::Result>> { + let pool = self.pool.clone(); + let input = input.to_string(); + let mut conn = pool.connection_tagged("da_dispatcher").await?; + let batch = conn + .data_availability_dal() + .get_blob_data_by_blob_id(&input) + .await?; + drop(conn); + Ok(batch.map(|b| b.pubdata)) + } +} diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index ac900e72bb6b..2b44b507dc13 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -102,6 +102,7 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { local_root: Some(H256::zero()), aggregation_root: Some(H256::zero()), da_inclusion_data: Some(vec![]), + da_blob_id: Some(vec![]), } } diff --git a/get_all_blobs/.gitignore b/get_all_blobs/.gitignore new file mode 100644 index 000000000000..a1ee59a11803 --- /dev/null +++ b/get_all_blobs/.gitignore @@ -0,0 +1 @@ +blob_data.json diff --git a/get_all_blobs/Cargo.toml b/get_all_blobs/Cargo.toml new file mode 100644 index 000000000000..d629650f6ff1 --- /dev/null +++ b/get_all_blobs/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "get_all_blobs" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +anyhow.workspace = true +tokio = { version = "1" , features = ["full"] } +axum.workspace = true +rustls.workspace = true +rlp.workspace = true +hex.workspace = true + +reqwest.workspace = true +serde = { version = "1.0", features = ["derive"] } +serde_json.workspace = true + +tonic = { version = "0.12.1", features = ["tls", "channel", "tls-roots"]} +prost = "0.13.1" +kzgpad-rs = { git = "https://github.com/Layr-Labs/kzgpad-rs.git", tag = "v0.1.0" } +alloy = { version = "0.3", features = ["full"] } +futures = "0.3" diff --git a/get_all_blobs/abi/commitBatchesSharedBridge.json b/get_all_blobs/abi/commitBatchesSharedBridge.json new file mode 100644 index 000000000000..877ce399c1c6 --- /dev/null +++ b/get_all_blobs/abi/commitBatchesSharedBridge.json @@ -0,0 +1,119 @@ +[ + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "batchHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBatchInfo", + "name": "", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "timestamp", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "newStateRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "bootloaderHeapInitialContentsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "eventsQueueStateHash", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "systemLogs", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "pubdataCommitments", + "type": "bytes" + } + ], + "internalType": "struct IExecutor.CommitBatchInfo[]", + "name": "_newBatchesData", + "type": "tuple[]" + } + ], + "name": "commitBatchesSharedBridge", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/get_all_blobs/src/blob_info.rs b/get_all_blobs/src/blob_info.rs new file mode 100644 index 000000000000..caefe3391deb --- /dev/null +++ b/get_all_blobs/src/blob_info.rs @@ -0,0 +1,504 @@ +use std::fmt; + +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; + +use crate::generated::{ + common::G1Commitment as DisperserG1Commitment, + disperser::{ + BatchHeader as DisperserBatchHeader, BatchMetadata as DisperserBatchMetadata, + BlobHeader as DisperserBlobHeader, BlobInfo as DisperserBlobInfo, + BlobQuorumParam as DisperserBlobQuorumParam, + BlobVerificationProof as DisperserBlobVerificationProof, + }, +}; + +#[derive(Debug)] +pub enum ConversionError { + NotPresentError, +} + +impl fmt::Display for ConversionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConversionError::NotPresentError => write!(f, "Failed to convert BlobInfo"), + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct G1Commitment { + pub x: Vec, + pub y: Vec, +} + +impl G1Commitment { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.x.len().to_be_bytes()); + bytes.extend(&self.x); + bytes.extend(&self.y.len().to_be_bytes()); + bytes.extend(&self.y); + + bytes + } +} + +impl Decodable for G1Commitment { + fn decode(rlp: &Rlp) -> Result { + let x: Vec = rlp.val_at(0)?; // Decode first element as Vec + let y: Vec = rlp.val_at(1)?; // Decode second element as Vec + + Ok(G1Commitment { x, y }) + } +} + +impl Encodable for G1Commitment { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + s.append(&self.x); + s.append(&self.y); + } +} + +impl From for G1Commitment { + fn from(value: DisperserG1Commitment) -> Self { + Self { + x: value.x, + y: value.y, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobQuorumParam { + pub quorum_number: u32, + pub adversary_threshold_percentage: u32, + pub confirmation_threshold_percentage: u32, + pub chunk_length: u32, +} + +impl BlobQuorumParam { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.quorum_number.to_be_bytes()); + bytes.extend(&self.adversary_threshold_percentage.to_be_bytes()); + bytes.extend(&self.confirmation_threshold_percentage.to_be_bytes()); + bytes.extend(&self.chunk_length.to_be_bytes()); + + bytes + } +} + +impl Decodable for BlobQuorumParam { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobQuorumParam { + quorum_number: rlp.val_at(0)?, + adversary_threshold_percentage: rlp.val_at(1)?, + confirmation_threshold_percentage: rlp.val_at(2)?, + chunk_length: rlp.val_at(3)?, + }) + } +} + +impl Encodable for BlobQuorumParam { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(4); + s.append(&self.quorum_number); + s.append(&self.adversary_threshold_percentage); + s.append(&self.confirmation_threshold_percentage); + s.append(&self.chunk_length); + } +} + +impl From for BlobQuorumParam { + fn from(value: DisperserBlobQuorumParam) -> Self { + Self { + quorum_number: value.quorum_number, + adversary_threshold_percentage: value.adversary_threshold_percentage, + confirmation_threshold_percentage: value.confirmation_threshold_percentage, + chunk_length: value.chunk_length, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobHeader { + pub commitment: G1Commitment, + pub data_length: u32, + pub blob_quorum_params: Vec, +} + +impl BlobHeader { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(self.commitment.to_bytes()); + bytes.extend(&self.data_length.to_be_bytes()); + bytes.extend(&self.blob_quorum_params.len().to_be_bytes()); + + for quorum in &self.blob_quorum_params { + bytes.extend(quorum.to_bytes()); + } + + bytes + } +} + +impl Decodable for BlobHeader { + fn decode(rlp: &Rlp) -> Result { + let commitment: G1Commitment = rlp.val_at(0)?; + let data_length: u32 = rlp.val_at(1)?; + let blob_quorum_params: Vec = rlp.list_at(2)?; + + Ok(BlobHeader { + commitment, + data_length, + blob_quorum_params, + }) + } +} + +impl Encodable for BlobHeader { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3); + s.append(&self.commitment); + s.append(&self.data_length); + s.append_list(&self.blob_quorum_params); + } +} + +impl TryFrom for BlobHeader { + type Error = ConversionError; + fn try_from(value: DisperserBlobHeader) -> Result { + if value.commitment.is_none() { + return Err(ConversionError::NotPresentError); + } + let blob_quorum_params: Vec = value + .blob_quorum_params + .iter() + .map(|param| BlobQuorumParam::from(param.clone())) + .collect(); + Ok(Self { + commitment: G1Commitment::from(value.commitment.unwrap()), + data_length: value.data_length, + blob_quorum_params, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BatchHeader { + pub batch_root: Vec, + pub quorum_numbers: Vec, + pub quorum_signed_percentages: Vec, + pub reference_block_number: u32, +} + +impl BatchHeader { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.batch_root.len().to_be_bytes()); + bytes.extend(&self.batch_root); + bytes.extend(&self.quorum_numbers.len().to_be_bytes()); + bytes.extend(&self.quorum_numbers); + bytes.extend(&self.quorum_signed_percentages.len().to_be_bytes()); + bytes.extend(&self.quorum_signed_percentages); + bytes.extend(&self.reference_block_number.to_be_bytes()); + + bytes + } +} + +impl Decodable for BatchHeader { + fn decode(rlp: &Rlp) -> Result { + Ok(BatchHeader { + batch_root: rlp.val_at(0)?, + quorum_numbers: rlp.val_at(1)?, + quorum_signed_percentages: rlp.val_at(2)?, + reference_block_number: rlp.val_at(3)?, + }) + } +} + +impl Encodable for BatchHeader { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(4); + s.append(&self.batch_root); + s.append(&self.quorum_numbers); + s.append(&self.quorum_signed_percentages); + s.append(&self.reference_block_number); + } +} + +impl From for BatchHeader { + fn from(value: DisperserBatchHeader) -> Self { + Self { + batch_root: value.batch_root, + quorum_numbers: value.quorum_numbers, + quorum_signed_percentages: value.quorum_signed_percentages, + reference_block_number: value.reference_block_number, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BatchMetadata { + pub batch_header: BatchHeader, + pub signatory_record_hash: Vec, + pub fee: Vec, + pub confirmation_block_number: u32, + pub batch_header_hash: Vec, +} + +impl BatchMetadata { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(self.batch_header.to_bytes()); + bytes.extend(&self.signatory_record_hash); + bytes.extend(&self.confirmation_block_number.to_be_bytes()); + + bytes + } +} + +impl Decodable for BatchMetadata { + fn decode(rlp: &Rlp) -> Result { + let batch_header: BatchHeader = rlp.val_at(0)?; + + Ok(BatchMetadata { + batch_header, + signatory_record_hash: rlp.val_at(1)?, + fee: rlp.val_at(2)?, + confirmation_block_number: rlp.val_at(3)?, + batch_header_hash: rlp.val_at(4)?, + }) + } +} + +impl Encodable for BatchMetadata { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5); + s.append(&self.batch_header); + s.append(&self.signatory_record_hash); + s.append(&self.fee); + s.append(&self.confirmation_block_number); + s.append(&self.batch_header_hash); + } +} + +impl TryFrom for BatchMetadata { + type Error = ConversionError; + fn try_from(value: DisperserBatchMetadata) -> Result { + if value.batch_header.is_none() { + return Err(ConversionError::NotPresentError); + } + Ok(Self { + batch_header: BatchHeader::from(value.batch_header.unwrap()), + signatory_record_hash: value.signatory_record_hash, + fee: value.fee, + confirmation_block_number: value.confirmation_block_number, + batch_header_hash: value.batch_header_hash, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobVerificationProof { + pub batch_id: u32, + pub blob_index: u32, + pub batch_medatada: BatchMetadata, + pub inclusion_proof: Vec, + pub quorum_indexes: Vec, +} + +impl BlobVerificationProof { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.batch_id.to_be_bytes()); + bytes.extend(&self.blob_index.to_be_bytes()); + bytes.extend(self.batch_medatada.to_bytes()); + bytes.extend(&self.inclusion_proof.len().to_be_bytes()); + bytes.extend(&self.inclusion_proof); + bytes.extend(&self.quorum_indexes.len().to_be_bytes()); + bytes.extend(&self.quorum_indexes); + + bytes + } +} + +impl Decodable for BlobVerificationProof { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobVerificationProof { + batch_id: rlp.val_at(0)?, + blob_index: rlp.val_at(1)?, + batch_medatada: rlp.val_at(2)?, + inclusion_proof: rlp.val_at(3)?, + quorum_indexes: rlp.val_at(4)?, + }) + } +} + +impl Encodable for BlobVerificationProof { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5); + s.append(&self.batch_id); + s.append(&self.blob_index); + s.append(&self.batch_medatada); + s.append(&self.inclusion_proof); + s.append(&self.quorum_indexes); + } +} + +impl TryFrom for BlobVerificationProof { + type Error = ConversionError; + fn try_from(value: DisperserBlobVerificationProof) -> Result { + if value.batch_metadata.is_none() { + return Err(ConversionError::NotPresentError); + } + Ok(Self { + batch_id: value.batch_id, + blob_index: value.blob_index, + batch_medatada: BatchMetadata::try_from(value.batch_metadata.unwrap())?, + inclusion_proof: value.inclusion_proof, + quorum_indexes: value.quorum_indexes, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobInfo { + pub blob_header: BlobHeader, + pub blob_verification_proof: BlobVerificationProof, +} + +impl BlobInfo { + pub fn to_bytes(&self) -> Vec { + let mut bytes = vec![]; + let blob_header_bytes = self.blob_header.to_bytes(); + bytes.extend(blob_header_bytes.len().to_be_bytes()); + bytes.extend(blob_header_bytes); + let blob_verification_proof_bytes = self.blob_verification_proof.to_bytes(); + bytes.extend(blob_verification_proof_bytes); + bytes + } +} + +impl Decodable for BlobInfo { + fn decode(rlp: &Rlp) -> Result { + let blob_header: BlobHeader = rlp.val_at(0)?; + let blob_verification_proof: BlobVerificationProof = rlp.val_at(1)?; + + Ok(BlobInfo { + blob_header, + blob_verification_proof, + }) + } +} + +impl Encodable for BlobInfo { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + s.append(&self.blob_header); + s.append(&self.blob_verification_proof); + } +} + +impl TryFrom for BlobInfo { + type Error = ConversionError; + fn try_from(value: DisperserBlobInfo) -> Result { + if value.blob_header.is_none() || value.blob_verification_proof.is_none() { + return Err(ConversionError::NotPresentError); + } + Ok(Self { + blob_header: BlobHeader::try_from(value.blob_header.unwrap())?, + blob_verification_proof: BlobVerificationProof::try_from( + value.blob_verification_proof.unwrap(), + )?, + }) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_blob_info_encoding_and_decoding() { + let blob_info = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, + 8, 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, + 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, + 146, 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, + 140, 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, + 125, 123, 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, + 157, 203, 22, 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, + 128, 112, 84, 34, 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, + 5, 120, 18, 187, 51, 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, + 166, 66, 157, 255, 237, 69, 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, + 156, 220, 159, 4, 99, 48, 191, 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, + 182, 109, 207, 197, 239, 161, 132, 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, + 253, 23, 169, 75, 236, 211, 126, 121, 132, 191, 68, 167, 200, 16, 154, 149, + 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, 153, 30, 209, 238, 53, 233, 148, + 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, 255, 219, 170, 98, 17, 160, 179, + 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, 73, 78, 79, 72, 253, 105, + 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + + let encoded_blob_info = rlp::encode(&blob_info); + let decoded_blob_info: BlobInfo = rlp::decode(&encoded_blob_info).unwrap(); + + assert_eq!(blob_info, decoded_blob_info); + } +} diff --git a/get_all_blobs/src/client.rs b/get_all_blobs/src/client.rs new file mode 100644 index 000000000000..f7da1a1ca70d --- /dev/null +++ b/get_all_blobs/src/client.rs @@ -0,0 +1,84 @@ +use std::str::FromStr; + +use tonic::transport::{Channel, ClientTlsConfig, Endpoint}; + +use crate::{ + blob_info::BlobInfo, + generated::disperser::{self, disperser_client::DisperserClient}, +}; + +#[derive(Debug, Clone)] +pub struct EigenClientRetriever { + client: DisperserClient, +} + +impl EigenClientRetriever { + pub async fn new(disperser_rpc: &str) -> anyhow::Result { + let endpoint = Endpoint::from_str(disperser_rpc)?.tls_config(ClientTlsConfig::new())?; + let client = DisperserClient::connect(endpoint) + .await + .map_err(|e| anyhow::anyhow!("Failed to connect to Disperser server: {}", e))?; + + Ok(EigenClientRetriever { client }) + } + + pub async fn get_blob_data(&self, blob_info: BlobInfo) -> anyhow::Result>> { + let blob_index = blob_info.blob_verification_proof.blob_index; + let batch_header_hash = blob_info + .blob_verification_proof + .batch_medatada + .batch_header_hash; + let get_response = self + .client + .clone() + .retrieve_blob(disperser::RetrieveBlobRequest { + batch_header_hash, + blob_index, + }) + .await + .unwrap() + .into_inner(); + + if get_response.data.is_empty() { + panic!("Empty data returned from Disperser") + } + + let data = kzgpad_rs::remove_empty_byte_from_padded_bytes(&get_response.data); + Ok(Some(data)) + } + + pub async fn get_blob_status(&self, blob_id: &str) -> anyhow::Result> { + let polling_request = disperser::BlobStatusRequest { + request_id: hex::decode(blob_id)?, + }; + + let resp = self + .client + .clone() + .get_blob_status(polling_request.clone()) + .await? + .into_inner(); + + match disperser::BlobStatus::try_from(resp.status)? { + disperser::BlobStatus::Processing | disperser::BlobStatus::Dispersing => Ok(None), + disperser::BlobStatus::Failed => Err(anyhow::anyhow!("Blob dispatch failed")), + disperser::BlobStatus::InsufficientSignatures => { + Err(anyhow::anyhow!("Insufficient signatures")) + } + disperser::BlobStatus::Confirmed => { + let blob_info = resp + .info + .ok_or_else(|| anyhow::anyhow!("No blob header in response"))?; + Ok(Some(blob_info.try_into().unwrap())) + } + disperser::BlobStatus::Finalized => { + let blob_info = resp + .info + .ok_or_else(|| anyhow::anyhow!("No blob header in response"))?; + Ok(Some(blob_info.try_into().unwrap())) + } + + _ => Err(anyhow::anyhow!("Received unknown blob status")), + } + } +} diff --git a/get_all_blobs/src/generated/common.rs b/get_all_blobs/src/generated/common.rs new file mode 100644 index 000000000000..0599b9af4127 --- /dev/null +++ b/get_all_blobs/src/generated/common.rs @@ -0,0 +1,63 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct G1Commitment { + /// The X coordinate of the KZG commitment. This is the raw byte representation of the field element. + #[prost(bytes = "vec", tag = "1")] + pub x: ::prost::alloc::vec::Vec, + /// The Y coordinate of the KZG commitment. This is the raw byte representation of the field element. + #[prost(bytes = "vec", tag = "2")] + pub y: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct G2Commitment { + /// The A0 element of the X coordinate of G2 point. + #[prost(bytes = "vec", tag = "1")] + pub x_a0: ::prost::alloc::vec::Vec, + /// The A1 element of the X coordinate of G2 point. + #[prost(bytes = "vec", tag = "2")] + pub x_a1: ::prost::alloc::vec::Vec, + /// The A0 element of the Y coordinate of G2 point. + #[prost(bytes = "vec", tag = "3")] + pub y_a0: ::prost::alloc::vec::Vec, + /// The A1 element of the Y coordinate of G2 point. + #[prost(bytes = "vec", tag = "4")] + pub y_a1: ::prost::alloc::vec::Vec, +} +/// BlobCommitment represents commitment of a specific blob, containing its +/// KZG commitment, degree proof, the actual degree, and data length in number of symbols. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobCommitment { + #[prost(message, optional, tag = "1")] + pub commitment: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub length_commitment: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub length_proof: ::core::option::Option, + #[prost(uint32, tag = "4")] + pub data_length: u32, +} +/// BlobCertificate is what gets attested by the network +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobCertificate { + #[prost(uint32, tag = "1")] + pub version: u32, + #[prost(bytes = "vec", tag = "2")] + pub blob_key: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub blob_commitment: ::core::option::Option, + #[prost(uint32, repeated, tag = "4")] + pub quorum_numbers: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "5")] + pub reference_block_number: u32, +} +/// A chunk of a blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ChunkData { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} diff --git a/get_all_blobs/src/generated/disperser.rs b/get_all_blobs/src/generated/disperser.rs new file mode 100644 index 000000000000..b2ff5edc183c --- /dev/null +++ b/get_all_blobs/src/generated/disperser.rs @@ -0,0 +1,486 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticatedRequest { + #[prost(oneof = "authenticated_request::Payload", tags = "1, 2")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `AuthenticatedRequest`. +pub mod authenticated_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + #[prost(message, tag = "1")] + DisperseRequest(super::DisperseBlobRequest), + #[prost(message, tag = "2")] + AuthenticationData(super::AuthenticationData), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticatedReply { + #[prost(oneof = "authenticated_reply::Payload", tags = "1, 2")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `AuthenticatedReply`. +pub mod authenticated_reply { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + #[prost(message, tag = "1")] + BlobAuthHeader(super::BlobAuthHeader), + #[prost(message, tag = "2")] + DisperseReply(super::DisperseBlobReply), + } +} +/// BlobAuthHeader contains information about the blob for the client to verify and sign. +/// - Once payments are enabled, the BlobAuthHeader will contain the KZG commitment to the blob, which the client +/// will verify and sign. Having the client verify the KZG commitment instead of calculating it avoids +/// the need for the client to have the KZG structured reference string (SRS), which can be large. +/// The signed KZG commitment prevents the disperser from sending a different blob to the DA Nodes +/// than the one the client sent. +/// - In the meantime, the BlobAuthHeader contains a simple challenge parameter is used to prevent +/// replay attacks in the event that a signature is leaked. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobAuthHeader { + #[prost(uint32, tag = "1")] + pub challenge_parameter: u32, +} +/// AuthenticationData contains the signature of the BlobAuthHeader. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticationData { + #[prost(bytes = "vec", tag = "1")] + pub authentication_data: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DisperseBlobRequest { + /// The data to be dispersed. + /// The size of data must be <= 2MiB. Every 32 bytes of data chunk is interpreted as an integer in big endian format + /// where the lower address has more significant bits. The integer must stay in the valid range to be interpreted + /// as a field element on the bn254 curve. The valid range is + /// 0 <= x < 21888242871839275222246405745257275088548364400416034343698204186575808495617 + /// containing slightly less than 254 bits and more than 253 bits. If any one of the 32 bytes chunk is outside the range, + /// the whole request is deemed as invalid, and rejected. + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, + /// The quorums to which the blob will be sent, in addition to the required quorums which are configured + /// on the EigenDA smart contract. If required quorums are included here, an error will be returned. + /// The disperser will ensure that the encoded blobs for each quorum are all processed + /// within the same batch. + #[prost(uint32, repeated, tag = "2")] + pub custom_quorum_numbers: ::prost::alloc::vec::Vec, + /// The account ID of the client. This should be a hex-encoded string of the ECSDA public key + /// corresponding to the key used by the client to sign the BlobAuthHeader. + #[prost(string, tag = "3")] + pub account_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DisperseBlobReply { + /// The status of the blob associated with the request_id. + #[prost(enumeration = "BlobStatus", tag = "1")] + pub result: i32, + /// The request ID generated by the disperser. + /// Once a request is accepted (although not processed), a unique request ID will be + /// generated. + /// Two different DisperseBlobRequests (determined by the hash of the DisperseBlobRequest) + /// will have different IDs, and the same DisperseBlobRequest sent repeatedly at different + /// times will also have different IDs. + /// The client should use this ID to query the processing status of the request (via + /// the GetBlobStatus API). + #[prost(bytes = "vec", tag = "2")] + pub request_id: ::prost::alloc::vec::Vec, +} +/// BlobStatusRequest is used to query the status of a blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobStatusRequest { + #[prost(bytes = "vec", tag = "1")] + pub request_id: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobStatusReply { + /// The status of the blob. + #[prost(enumeration = "BlobStatus", tag = "1")] + pub status: i32, + /// The blob info needed for clients to confirm the blob against the EigenDA contracts. + #[prost(message, optional, tag = "2")] + pub info: ::core::option::Option, +} +/// RetrieveBlobRequest contains parameters to retrieve the blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RetrieveBlobRequest { + #[prost(bytes = "vec", tag = "1")] + pub batch_header_hash: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "2")] + pub blob_index: u32, +} +/// RetrieveBlobReply contains the retrieved blob data +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RetrieveBlobReply { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} +/// BlobInfo contains information needed to confirm the blob against the EigenDA contracts +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobInfo { + #[prost(message, optional, tag = "1")] + pub blob_header: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub blob_verification_proof: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobHeader { + /// KZG commitment of the blob. + #[prost(message, optional, tag = "1")] + pub commitment: ::core::option::Option, + /// The length of the blob in symbols (each symbol is 32 bytes). + #[prost(uint32, tag = "2")] + pub data_length: u32, + /// The params of the quorums that this blob participates in. + #[prost(message, repeated, tag = "3")] + pub blob_quorum_params: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobQuorumParam { + /// The ID of the quorum. + #[prost(uint32, tag = "1")] + pub quorum_number: u32, + /// The max percentage of stake within the quorum that can be held by or delegated + /// to adversarial operators. Currently, this and the next parameter are standardized + /// across the quorum using values read from the EigenDA contracts. + #[prost(uint32, tag = "2")] + pub adversary_threshold_percentage: u32, + /// The min percentage of stake that must attest in order to consider + /// the dispersal is successful. + #[prost(uint32, tag = "3")] + pub confirmation_threshold_percentage: u32, + /// The length of each chunk. + #[prost(uint32, tag = "4")] + pub chunk_length: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobVerificationProof { + /// batch_id is an incremental ID assigned to a batch by EigenDAServiceManager + #[prost(uint32, tag = "1")] + pub batch_id: u32, + /// The index of the blob in the batch (which is logically an ordered list of blobs). + #[prost(uint32, tag = "2")] + pub blob_index: u32, + #[prost(message, optional, tag = "3")] + pub batch_metadata: ::core::option::Option, + /// inclusion_proof is a merkle proof for a blob header's inclusion in a batch + #[prost(bytes = "vec", tag = "4")] + pub inclusion_proof: ::prost::alloc::vec::Vec, + /// indexes of quorums in BatchHeader.quorum_numbers that match the quorums in BlobHeader.blob_quorum_params + /// Ex. BlobHeader.blob_quorum_params = [ + /// { + /// quorum_number = 0, + /// ... + /// }, + /// { + /// quorum_number = 3, + /// ... + /// }, + /// { + /// quorum_number = 5, + /// ... + /// }, + /// ] + /// BatchHeader.quorum_numbers = \[0, 5, 3\] => 0x000503 + /// Then, quorum_indexes = \[0, 2, 1\] => 0x000201 + #[prost(bytes = "vec", tag = "5")] + pub quorum_indexes: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchMetadata { + #[prost(message, optional, tag = "1")] + pub batch_header: ::core::option::Option, + /// The hash of all public keys of the operators that did not sign the batch. + #[prost(bytes = "vec", tag = "2")] + pub signatory_record_hash: ::prost::alloc::vec::Vec, + /// The fee payment paid by users for dispersing this batch. It's the bytes + /// representation of a big.Int value. + #[prost(bytes = "vec", tag = "3")] + pub fee: ::prost::alloc::vec::Vec, + /// The Ethereum block number at which the batch is confirmed onchain. + #[prost(uint32, tag = "4")] + pub confirmation_block_number: u32, + /// This is the hash of the ReducedBatchHeader defined onchain, see: + /// + /// The is the message that the operators will sign their signatures on. + #[prost(bytes = "vec", tag = "5")] + pub batch_header_hash: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchHeader { + /// The root of the merkle tree with the hashes of blob headers as leaves. + #[prost(bytes = "vec", tag = "1")] + pub batch_root: ::prost::alloc::vec::Vec, + /// All quorums associated with blobs in this batch. Sorted in ascending order. + /// Ex. \[0, 2, 1\] => 0x000102 + #[prost(bytes = "vec", tag = "2")] + pub quorum_numbers: ::prost::alloc::vec::Vec, + /// The percentage of stake that has signed for this batch. + /// The quorum_signed_percentages\[i\] is percentage for the quorum_numbers\[i\]. + #[prost(bytes = "vec", tag = "3")] + pub quorum_signed_percentages: ::prost::alloc::vec::Vec, + /// The Ethereum block number at which the batch was created. + /// The Disperser will encode and disperse the blobs based on the onchain info + /// (e.g. operator stakes) at this block number. + #[prost(uint32, tag = "4")] + pub reference_block_number: u32, +} +/// BlobStatus represents the status of a blob. +/// The status of a blob is updated as the blob is processed by the disperser. +/// The status of a blob can be queried by the client using the GetBlobStatus API. +/// Intermediate states are states that the blob can be in while being processed, and it can be updated to a differet state: +/// - PROCESSING +/// - DISPERSING +/// - CONFIRMED +/// Terminal states are states that will not be updated to a different state: +/// - FAILED +/// - FINALIZED +/// - INSUFFICIENT_SIGNATURES +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum BlobStatus { + Unknown = 0, + /// PROCESSING means that the blob is currently being processed by the disperser + Processing = 1, + /// CONFIRMED means that the blob has been dispersed to DA Nodes and the dispersed + /// batch containing the blob has been confirmed onchain + Confirmed = 2, + /// FAILED means that the blob has failed permanently (for reasons other than insufficient + /// signatures, which is a separate state) + Failed = 3, + /// FINALIZED means that the block containing the blob's confirmation transaction has been finalized on Ethereum + Finalized = 4, + /// INSUFFICIENT_SIGNATURES means that the confirmation threshold for the blob was not met + /// for at least one quorum. + InsufficientSignatures = 5, + /// DISPERSING means that the blob is currently being dispersed to DA Nodes and being confirmed onchain + Dispersing = 6, +} +impl BlobStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + BlobStatus::Unknown => "UNKNOWN", + BlobStatus::Processing => "PROCESSING", + BlobStatus::Confirmed => "CONFIRMED", + BlobStatus::Failed => "FAILED", + BlobStatus::Finalized => "FINALIZED", + BlobStatus::InsufficientSignatures => "INSUFFICIENT_SIGNATURES", + BlobStatus::Dispersing => "DISPERSING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "PROCESSING" => Some(Self::Processing), + "CONFIRMED" => Some(Self::Confirmed), + "FAILED" => Some(Self::Failed), + "FINALIZED" => Some(Self::Finalized), + "INSUFFICIENT_SIGNATURES" => Some(Self::InsufficientSignatures), + "DISPERSING" => Some(Self::Dispersing), + _ => None, + } + } +} +/// Generated client implementations. +pub mod disperser_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::{http::Uri, *}; + /// Disperser defines the public APIs for dispersing blobs. + #[derive(Debug, Clone)] + pub struct DisperserClient { + inner: tonic::client::Grpc, + } + impl DisperserClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DisperserClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DisperserClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + DisperserClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// This API accepts blob to disperse from clients. + /// This executes the dispersal async, i.e. it returns once the request + /// is accepted. The client could use GetBlobStatus() API to poll the the + /// processing status of the blob. + pub async fn disperse_blob( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/disperser.Disperser/DisperseBlob"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "DisperseBlob")); + self.inner.unary(req, path, codec).await + } + /// DisperseBlobAuthenticated is similar to DisperseBlob, except that it requires the + /// client to authenticate itself via the AuthenticationData message. The protoco is as follows: + /// 1. The client sends a DisperseBlobAuthenticated request with the DisperseBlobRequest message + /// 2. The Disperser sends back a BlobAuthHeader message containing information for the client to + /// verify and sign. + /// 3. The client verifies the BlobAuthHeader and sends back the signed BlobAuthHeader in an + /// AuthenticationData message. + /// 4. The Disperser verifies the signature and returns a DisperseBlobReply message. + pub async fn disperse_blob_authenticated( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/DisperseBlobAuthenticated", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut().insert(GrpcMethod::new( + "disperser.Disperser", + "DisperseBlobAuthenticated", + )); + self.inner.streaming(req, path, codec).await + } + /// This API is meant to be polled for the blob status. + pub async fn get_blob_status( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/disperser.Disperser/GetBlobStatus"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "GetBlobStatus")); + self.inner.unary(req, path, codec).await + } + /// This retrieves the requested blob from the Disperser's backend. + /// This is a more efficient way to retrieve blobs than directly retrieving + /// from the DA Nodes (see detail about this approach in + /// api/proto/retriever/retriever.proto). + /// The blob should have been initially dispersed via this Disperser service + /// for this API to work. + pub async fn retrieve_blob( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/disperser.Disperser/RetrieveBlob"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "RetrieveBlob")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/get_all_blobs/src/generated/mod.rs b/get_all_blobs/src/generated/mod.rs new file mode 100644 index 000000000000..d77a351741d9 --- /dev/null +++ b/get_all_blobs/src/generated/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod common; +pub(crate) mod disperser; +// pub(crate) mod eigendaservicemanager; diff --git a/get_all_blobs/src/main.rs b/get_all_blobs/src/main.rs new file mode 100644 index 000000000000..33c2c67648fc --- /dev/null +++ b/get_all_blobs/src/main.rs @@ -0,0 +1,170 @@ +use std::{fs, str::FromStr}; + +use alloy::{ + dyn_abi::JsonAbiExt, + json_abi::JsonAbi, + network::Ethereum, + primitives::Address, + providers::{Provider, RootProvider}, +}; +use client::EigenClientRetriever; +use serde::{Deserialize, Serialize}; + +mod blob_info; +mod client; +mod generated; + +#[derive(Debug, Serialize, Deserialize)] +struct BlobData { + pub blob_id: String, + pub blob: String, +} + +const EIGENDA_API_URL: &str = "https://disperser-holesky.eigenda.xyz:443"; +const BLOB_DATA_JSON: &str = "blob_data.json"; +const ABI_JSON: &str = "./abi/commitBatchesSharedBridge.json"; +const COMMIT_BATCHES_SELECTOR: &str = "6edd4f12"; + +async fn get_blob(commitment: &str) -> anyhow::Result> { + let client = EigenClientRetriever::new(EIGENDA_API_URL).await?; + let blob_id = commitment; + let blob_info = client + .get_blob_status(blob_id) + .await? + .ok_or_else(|| anyhow::anyhow!("Blob not found"))?; + let data = client + .get_blob_data(blob_info) + .await? + .ok_or_else(|| anyhow::anyhow!("Blob not found"))?; + + Ok(data) +} + +async fn get_transactions( + provider: &RootProvider< + alloy::transports::http::Http, + Ethereum, + >, + validator_timelock_address: Address, + block_start: u64, +) -> anyhow::Result<()> { + let latest_block = provider.get_block_number().await?; + let mut json_array = Vec::new(); + + let mut i = 0; + for block_number in block_start..=latest_block { + i += 1; + if i % 50 == 0 { + println!( + "\x1b[32mProcessed up to block {} of {}\x1b[0m", + block_number, latest_block + ); + } + if let Ok(Some(block)) = provider + .get_block_by_number(block_number.into(), true) + .await + { + for tx in block.transactions.into_transactions() { + if let Some(to) = tx.to { + if to == validator_timelock_address { + let input = tx.input; + let selector = &input[0..4]; + if selector == hex::decode(COMMIT_BATCHES_SELECTOR)? { + if let Ok(decoded) = decode_blob_data_input(&input[4..]).await { + for blob in decoded { + json_array.push(blob); + } + } + } + } + } + } + } + } + + if json_array.is_empty() { + println!("\x1b[31mNo transactions found.\x1b[0m"); + return Ok(()); + } + + let json_string = serde_json::to_string_pretty(&json_array)?; + fs::write(BLOB_DATA_JSON, json_string)?; + println!("\x1b[32mData stored in blob_data.json file.\x1b[0m"); + + Ok(()) +} + +async fn decode_blob_data_input(input: &[u8]) -> anyhow::Result> { + let json = std::fs::read_to_string(ABI_JSON)?; + let json_abi: JsonAbi = serde_json::from_str(&json)?; + let function = json_abi + .functions + .iter() + .find(|f| f.0 == "commitBatchesSharedBridge") + .ok_or(anyhow::anyhow!("Function not found"))? + .1; + + let decoded = function[0].abi_decode_input(input, true)?; + let commit_batch_info = decoded[2].as_array().ok_or(anyhow::anyhow!( + "CommitBatchInfo cannot be represented as an array" + ))?[0] + .as_tuple() + .ok_or(anyhow::anyhow!( + "CommitBatchInfo components cannot be represented as a tuple" + ))?; + + let mut blobs = vec![]; + + for pubdata_commitments in commit_batch_info.iter() { + let pubdata_commitments_bytes = pubdata_commitments.as_bytes(); + if let Ok(blob_data) = get_blob_from_pubdata_commitment(pubdata_commitments_bytes).await { + blobs.push(blob_data) + } + } + + Ok(blobs) +} + +async fn get_blob_from_pubdata_commitment( + pubdata_commitments_bytes: Option<&[u8]>, +) -> anyhow::Result { + if pubdata_commitments_bytes.is_none() { + return Err(anyhow::anyhow!( + "CommitBatchInfo components cannot be represented as a tuple" + )); + } + let pubdata_commitments_bytes = pubdata_commitments_bytes.unwrap(); + let blob_id = hex::decode(&pubdata_commitments_bytes[1..])?; + let blob_id = hex::encode(&blob_id); + let blob = get_blob(&blob_id).await?; + Ok(BlobData { + blob_id, + blob: hex::encode(blob), + }) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let args: Vec = std::env::args().collect(); + + if args.len() != 4 { + eprintln!("Usage: cargo run "); + std::process::exit(1); + } + + let validator_timelock_address = Address::from_str(&args[1])?; + + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + + let url = alloy::transports::http::reqwest::Url::from_str(&args[2])?; + let provider: RootProvider< + alloy::transports::http::Http, + Ethereum, + > = RootProvider::new_http(url); + + let block_start = args[3].parse::()?; + + get_transactions(&provider, validator_timelock_address, block_start).await?; + + Ok(()) +}