diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index bcade948d79..f719360c6a2 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -41,7 +41,7 @@ jobs: sudo add-apt-repository ppa:rmescandon/yq echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install -y kurtosis-cli yq + sudo apt install -y kurtosis-cli=1.3.1 yq kurtosis analytics disable - name: Download Docker image artifact @@ -88,7 +88,7 @@ jobs: sudo add-apt-repository ppa:rmescandon/yq echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install -y kurtosis-cli yq + sudo apt install -y kurtosis-cli=1.3.1 yq kurtosis analytics disable - name: Download Docker image artifact @@ -124,7 +124,7 @@ jobs: sudo add-apt-repository ppa:rmescandon/yq echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install -y kurtosis-cli yq + sudo apt install -y kurtosis-cli=1.3.1 yq kurtosis analytics disable - name: Download Docker image artifact diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 769b889de4d..a80470cf167 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -54,6 +54,20 @@ jobs: done echo "skip_ci=$SKIP_CI" >> $GITHUB_OUTPUT + lockbud: + name: lockbud + runs-on: ubuntu-latest + container: + image: sigmaprime/lockbud:latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install dependencies + run: apt update && apt install -y cmake + - name: Generate code coverage + run: | + cargo lockbud -k deadlock -b -l tokio_util + target-branch-check: name: target-branch-check runs-on: ubuntu-latest @@ -173,8 +187,19 @@ jobs: channel: stable cache-target: release bins: cargo-nextest + - name: Create CI logger dir + run: mkdir ${{ runner.temp }}/network_test_logs - name: Run network tests for all known forks run: make test-network + env: + TEST_FEATURES: portable,ci_logger + CI_LOGGER_DIR: ${{ runner.temp }}/network_test_logs + - name: Upload logs + uses: actions/upload-artifact@v4 + with: + name: network_test_logs + path: ${{ runner.temp }}/network_test_logs + slasher-tests: name: slasher-tests needs: [check-labels] @@ -395,7 +420,7 @@ jobs: channel: stable cache-target: release - name: Run Makefile to trigger the bash script - run: make cli + run: make cli-local # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. test-suite-success: @@ -422,6 +447,7 @@ jobs: 'cargo-udeps', 'compile-with-beta-compiler', 'cli-check', + 'lockbud', ] steps: - uses: actions/checkout@v4 diff --git a/Cargo.lock b/Cargo.lock index 87ede2e545e..b2fef59e9b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -59,19 +59,13 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -161,9 +155,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-consensus" -version = "0.3.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4177d135789e282e925092be8939d421b701c6d92c0a16679faa659d9166289d" +checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -193,9 +187,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.3.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "499ee14d296a133d142efd215eb36bf96124829fe91cf8f5d4e5ccdd381eae00" +checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -210,9 +204,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a767e59c86900dd7c3ce3ecef04f3ace5ac9631ee150beb8b7d22f7fa3bbb2d7" +checksum = "411aff151f2a73124ee473708e82ed51b2535f68928b6a1caa8bc1246ae6f7cd" dependencies = [ "alloy-rlp", "arbitrary", @@ -220,11 +214,11 @@ dependencies = [ "cfg-if", "const-hex", "derive_arbitrary", - "derive_more 0.99.18", + "derive_more 1.0.0", "getrandom", "hex-literal", "itoa", - "k256 0.13.3", + "k256 0.13.4", "keccak-asm", "proptest", "proptest-derive", @@ -328,9 +322,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arbitrary" @@ -488,9 +482,9 @@ checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -577,7 +571,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.35", + "rustix 0.38.37", "slab", "tracing", "windows-sys 0.59.0", @@ -676,9 +670,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "axum" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" dependencies = [ "async-trait", "axum-core", @@ -702,7 +696,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -710,9 +704,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" dependencies = [ "async-trait", "bytes", @@ -723,7 +717,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", "tracing", @@ -731,17 +725,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -810,12 +804,12 @@ dependencies = [ "int_to_bytes", "itertools 0.10.5", "kzg", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", "maplit", "merkle_proof", + "metrics", "oneshot_broadcast", "operation_pool", "parking_lot 0.12.3", @@ -886,9 +880,9 @@ dependencies = [ "fnv", "futures", "itertools 0.10.5", - "lighthouse_metrics", "lighthouse_network", "logging", + "metrics", "num_cpus", "parking_lot 0.12.3", "serde", @@ -1141,9 +1135,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -1224,9 +1218,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.15" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1350,9 +1344,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", @@ -1360,9 +1354,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", @@ -1373,9 +1367,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1425,8 +1419,8 @@ dependencies = [ "http_api", "http_metrics", "kzg", - "lighthouse_metrics", "lighthouse_network", + "metrics", "monitoring_api", "network", "operation_pool", @@ -1555,9 +1549,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -2075,13 +2069,14 @@ dependencies = [ "proc-macro2", "quote", "syn 2.0.77", + "unicode-xid", ] [[package]] name = "diesel" -version = "2.2.3" +version = "2.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e13bab2796f412722112327f3e575601a3e9cdcbe426f0d30dbf43f3f5dc71" +checksum = "158fe8e2e68695bd615d7e4f3227c0727b151330d3e253b525086c348d055d5e" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -2413,7 +2408,7 @@ dependencies = [ "bytes", "ed25519-dalek", "hex", - "k256 0.13.3", + "k256 0.13.4", "log", "rand", "serde", @@ -2423,11 +2418,11 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.77", @@ -2523,9 +2518,9 @@ dependencies = [ "ethereum_ssz_derive", "execution_layer", "futures", - "lighthouse_metrics", "logging", "merkle_proof", + "metrics", "parking_lot 0.12.3", "sensitive_url", "serde", @@ -3023,10 +3018,10 @@ dependencies = [ "jsonwebtoken", "keccak-hash", "kzg", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", + "metrics", "parking_lot 0.12.3", "pretty_reqwest_error", "rand", @@ -3034,6 +3029,7 @@ dependencies = [ "sensitive_url", "serde", "serde_json", + "sha2 0.9.9", "slog", "slot_clock", "ssz_types", @@ -3192,7 +3188,7 @@ checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -3223,7 +3219,7 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", - "lighthouse_metrics", + "metrics", "proto_array", "slog", "state_processing", @@ -3350,7 +3346,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", ] @@ -3467,9 +3463,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "git-version" @@ -3915,11 +3911,11 @@ dependencies = [ "futures", "genesis", "hex", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "lru", + "metrics", "network", "operation_pool", "parking_lot 0.12.3", @@ -3949,11 +3945,11 @@ name = "http_metrics" version = "0.1.0" dependencies = [ "beacon_chain", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "malloc_utils", + "metrics", "reqwest", "serde", "slog", @@ -4056,9 +4052,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-util", @@ -4067,13 +4063,15 @@ dependencies = [ "hyper 1.4.1", "pin-project-lite", "tokio", + "tower 0.4.13", + "tower-service", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4312,9 +4310,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "is-terminal" @@ -4414,9 +4412,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa 0.16.9", @@ -4437,9 +4435,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4870,7 +4868,7 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -4942,7 +4940,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -5080,11 +5078,11 @@ dependencies = [ "eth2_network_config", "ethereum_hashing", "futures", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "malloc_utils", + "metrics", "sensitive_url", "serde", "serde_json", @@ -5101,13 +5099,6 @@ dependencies = [ "validator_manager", ] -[[package]] -name = "lighthouse_metrics" -version = "0.2.0" -dependencies = [ - "prometheus", -] - [[package]] name = "lighthouse_network" version = "0.2.0" @@ -5131,11 +5122,11 @@ dependencies = [ "itertools 0.10.5", "libp2p", "libp2p-mplex", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", "lru_cache", + "metrics", "parking_lot 0.12.3", "prometheus-client", "quickcheck", @@ -5241,7 +5232,7 @@ name = "logging" version = "0.2.0" dependencies = [ "chrono", - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "serde", "serde_json", @@ -5297,7 +5288,7 @@ name = "malloc_utils" version = "0.1.0" dependencies = [ "libc", - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "tikv-jemalloc-ctl", "tikv-jemallocator", @@ -5413,6 +5404,13 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "metrics" +version = "0.2.0" +dependencies = [ + "prometheus", +] + [[package]] name = "migrations_internals" version = "2.2.0" @@ -5479,15 +5477,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -5544,8 +5533,8 @@ name = "monitoring_api" version = "0.1.0" dependencies = [ "eth2", - "lighthouse_metrics", "lighthouse_version", + "metrics", "regex", "reqwest", "sensitive_url", @@ -5710,6 +5699,7 @@ dependencies = [ "async-channel", "beacon_chain", "beacon_processor", + "bls", "delay_map", "derivative", "error-chain", @@ -5725,11 +5715,11 @@ dependencies = [ "igd-next", "itertools 0.10.5", "kzg", - "lighthouse_metrics", "lighthouse_network", "logging", "lru_cache", "matches", + "metrics", "operation_pool", "parking_lot 0.12.3", "rand", @@ -5999,9 +5989,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.3.1+3.3.1" +version = "300.3.2+3.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91" +checksum = "a211a18d945ef7e648cc6e0058f4c548ee46aab922ea203e0d30e966ea23647b" dependencies = [ "cc", ] @@ -6029,8 +6019,8 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.10.5", - "lighthouse_metrics", "maplit", + "metrics", "parking_lot 0.12.3", "rand", "rayon", @@ -6122,9 +6112,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -6169,7 +6159,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", "smallvec", "windows-targets 0.52.6", ] @@ -6239,9 +6229,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.11" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", @@ -6342,9 +6332,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -6355,15 +6345,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] @@ -6378,7 +6368,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.35", + "rustix 0.38.37", "tracing", "windows-sys 0.59.0", ] @@ -6426,9 +6416,9 @@ dependencies = [ [[package]] name = "postgres-types" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02048d9e032fb3cc3413bbf7b83a15d84a5d419778e2628751896d856498eee9" +checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" dependencies = [ "bytes", "fallible-iterator", @@ -6452,9 +6442,9 @@ dependencies = [ [[package]] name = "pq-sys" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a24ff9e4cf6945c988f0db7005d87747bf72864965c3529d259ad155ac41d584" +checksum = "f6cc05d7ea95200187117196eee9edd0644424911821aeb28a18ce60ea0b8793" dependencies = [ "vcpkg", ] @@ -6519,7 +6509,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -6704,9 +6694,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2d2fb862b7ba45e615c1429def928f2e15f815bdf933b27a2d3824e224c1f46" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", "futures-io", @@ -6714,7 +6704,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -6723,15 +6713,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.7" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0a9b3a42929fad8a7c3de7f86ce0814cfa893328157672680e9fb1145549c5" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -6740,15 +6730,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", "socket2 0.5.7", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6866,9 +6856,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.2" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58323dc32ea52a8ae105ff94bc0460c5d906307533ba3401aa63db3cbe491fe5" +checksum = "074373f3e7e5d27d8741d19512232adb47be8622d3daef3a45bcae72050c3d2a" dependencies = [ "libc", ] @@ -6884,18 +6874,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -7249,9 +7230,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.35" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -7281,21 +7262,21 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -7337,9 +7318,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -7429,11 +7410,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7522,9 +7503,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -7573,9 +7554,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -7592,9 +7573,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -7603,9 +7584,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -7749,9 +7730,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", "cfg-if", @@ -7865,12 +7846,12 @@ dependencies = [ "filesystem", "flate2", "libmdbx", - "lighthouse_metrics", "lmdb-rkv", "lmdb-rkv-sys", "logging", "lru", "maplit", + "metrics", "parking_lot 0.12.3", "rand", "rayon", @@ -8026,7 +8007,7 @@ dependencies = [ name = "slot_clock" version = "0.2.0" dependencies = [ - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "types", ] @@ -8154,8 +8135,8 @@ dependencies = [ "int_to_bytes", "integer-sqrt", "itertools 0.10.5", - "lighthouse_metrics", "merkle_proof", + "metrics", "rand", "rayon", "safe_arith", @@ -8195,8 +8176,8 @@ dependencies = [ "ethereum_ssz_derive", "itertools 0.10.5", "leveldb", - "lighthouse_metrics", "lru", + "metrics", "parking_lot 0.12.3", "safe_arith", "serde", @@ -8406,11 +8387,12 @@ version = "0.1.0" dependencies = [ "async-channel", "futures", - "lighthouse_metrics", "logging", + "metrics", "slog", "sloggers", "tokio", + "tracing", ] [[package]] @@ -8422,7 +8404,7 @@ dependencies = [ "cfg-if", "fastrand", "once_cell", - "rustix 0.38.35", + "rustix 0.38.37", "windows-sys 0.59.0", ] @@ -8452,7 +8434,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.35", + "rustix 0.38.37", "windows-sys 0.48.0", ] @@ -8491,18 +8473,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", @@ -8705,9 +8687,9 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03adcf0147e203b6032c0b2d30be1415ba03bc348901f3ff1cc0df6a733e60c3" +checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" dependencies = [ "async-trait", "byteorder", @@ -8752,9 +8734,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -8764,9 +8746,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -8795,7 +8777,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -8820,9 +8802,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -8844,6 +8826,21 @@ dependencies = [ "tokio", "tower-layer", "tower-service", +] + +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tower-layer", + "tower-service", "tracing", ] @@ -9117,15 +9114,15 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -9138,9 +9135,9 @@ checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -9247,11 +9244,11 @@ dependencies = [ "hyper 1.4.1", "itertools 0.10.5", "libsecp256k1", - "lighthouse_metrics", "lighthouse_version", "lockfile", "logging", "malloc_utils", + "metrics", "mockito", "monitoring_api", "node_test_rig", @@ -9307,6 +9304,7 @@ dependencies = [ "account_utils", "clap", "clap_utils", + "derivative", "environment", "eth2", "eth2_network_config", @@ -9418,7 +9416,7 @@ dependencies = [ "bytes", "eth2", "headers", - "lighthouse_metrics", + "metrics", "safe_arith", "serde", "serde_array_query", @@ -9624,11 +9622,11 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "whoami" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.4.1", + "redox_syscall 0.5.4", "wasite", "web-sys", ] @@ -10023,9 +10021,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "xmltree" diff --git a/Cargo.toml b/Cargo.toml index b5349134523..3ced362feab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", - "common/lighthouse_metrics", + "common/metrics", "common/lighthouse_version", "common/lockfile", "common/logging", @@ -95,7 +95,7 @@ resolver = "2" edition = "2021" [workspace.dependencies] -alloy-primitives = "0.8" +alloy-primitives = { version = "0.8", features = ["rlp", "getrandom"] } alloy-rlp = "0.3.4" alloy-consensus = "0.3.0" anyhow = "1" @@ -142,6 +142,7 @@ mockito = "1.5.0" num_cpus = "1" parking_lot = "0.12" paste = "1" +prometheus = "0.13" quickcheck = "1" quickcheck_macros = "1" quote = "1" @@ -214,7 +215,7 @@ gossipsub = { path = "beacon_node/lighthouse_network/gossipsub/" } http_api = { path = "beacon_node/http_api" } int_to_bytes = { path = "consensus/int_to_bytes" } kzg = { path = "crypto/kzg" } -lighthouse_metrics = { path = "common/lighthouse_metrics" } +metrics = { path = "common/metrics" } lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_version = { path = "common/lighthouse_version" } lockfile = { path = "common/lockfile" } diff --git a/Makefile b/Makefile index e6420a4c984..fd7d45f26a0 100644 --- a/Makefile +++ b/Makefile @@ -183,7 +183,7 @@ test-exec-engine: # test vectors. test: test-release -# Updates the CLI help text pages in the Lighthouse book, building with Docker. +# Updates the CLI help text pages in the Lighthouse book, building with Docker (primarily for Windows users). cli: docker run --rm --user=root \ -v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \ @@ -204,7 +204,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ + cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ -D clippy::manual_let_else \ -D clippy::large_stack_frames \ diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 0dc941df90c..b0fa0131808 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -42,7 +42,7 @@ hex = { workspace = true } int_to_bytes = { workspace = true } itertools = { workspace = true } kzg = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } lru = { workspace = true } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 491271d6a9e..9ee0b01df36 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1144,13 +1144,14 @@ pub fn verify_propagation_slot_range( let current_fork = spec.fork_name_at_slot::(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?); - let earliest_permissible_slot = if !current_fork.deneb_enabled() { - one_epoch_prior - // EIP-7045 - } else { + + let earliest_permissible_slot = if current_fork.deneb_enabled() { + // EIP-7045 one_epoch_prior .epoch(E::slots_per_epoch()) .start_slot(E::slots_per_epoch()) + } else { + one_epoch_prior }; if attestation_slot < earliest_permissible_slot { diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 198d7d61f09..b76dba88fd0 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -1,5 +1,5 @@ use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus}; -use execution_layer::{ExecutionLayer, ExecutionPayloadBody}; +use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; use slog::{crit, debug, error, Logger}; use std::collections::HashMap; use std::sync::Arc; @@ -57,7 +57,7 @@ struct BodiesByRange { struct BlockParts { blinded_block: Box>, header: Box>, - body: Option>>, + body: Option>>, } impl BlockParts { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5d287e2b68a..90a203f722c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,7 +22,7 @@ pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; use crate::data_availability_checker::{ Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, - DataColumnsToPublish, + DataColumnReconstructionResult, }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; @@ -34,7 +34,6 @@ use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, Prep use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker}; -use crate::historical_blocks::HistoricalBlockError; use crate::light_client_finality_update_verification::{ Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, }; @@ -755,12 +754,10 @@ impl BeaconChain { ) -> Result> + '_, Error> { let oldest_block_slot = self.store.get_oldest_block_slot(); if start_slot < oldest_block_slot { - return Err(Error::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot: start_slot, - oldest_block_slot, - }, - )); + return Err(Error::HistoricalBlockOutOfRange { + slot: start_slot, + oldest_block_slot, + }); } let local_head = self.head_snapshot(); @@ -785,12 +782,10 @@ impl BeaconChain { ) -> Result> + '_, Error> { let oldest_block_slot = self.store.get_oldest_block_slot(); if start_slot < oldest_block_slot { - return Err(Error::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot: start_slot, - oldest_block_slot, - }, - )); + return Err(Error::HistoricalBlockOutOfRange { + slot: start_slot, + oldest_block_slot, + }); } self.with_head(move |head| { @@ -991,7 +986,7 @@ impl BeaconChain { WhenSlotSkipped::Prev => self.block_root_at_slot_skips_prev(request_slot), } .or_else(|e| match e { - Error::HistoricalBlockError(_) => Ok(None), + Error::HistoricalBlockOutOfRange { .. } => Ok(None), e => Err(e), }) } @@ -2619,11 +2614,7 @@ impl BeaconChain { /// Check if the current slot is greater than or equal to the Capella fork epoch. pub fn current_slot_is_post_capella(&self) -> Result { let current_fork = self.spec.fork_name_at_slot::(self.slot()?); - if let ForkName::Base | ForkName::Altair | ForkName::Bellatrix = current_fork { - Ok(false) - } else { - Ok(true) - } + Ok(current_fork.capella_enabled()) } /// Import a BLS to execution change to the op pool. @@ -3019,13 +3010,7 @@ impl BeaconChain { self: &Arc, data_columns: Vec>, publish_fn: impl FnOnce() -> Result<(), BlockError>, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { let Ok((slot, block_root)) = data_columns .iter() .map(|c| (c.slot(), c.block_root())) @@ -3055,7 +3040,7 @@ impl BeaconChain { publish_fn, ) .await; - self.remove_notified_custody_columns(&block_root, r) + self.remove_notified(&block_root, r) } /// Cache the blobs in the processing cache, process it, then evict it from the cache if it was @@ -3114,13 +3099,7 @@ impl BeaconChain { pub async fn process_rpc_custody_columns( self: &Arc, custody_columns: DataColumnSidecarList, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { let Ok((slot, block_root)) = custody_columns .iter() .map(|c| (c.slot(), c.block_root())) @@ -3158,7 +3137,67 @@ impl BeaconChain { let r = self .check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) .await; - self.remove_notified_custody_columns(&block_root, r) + self.remove_notified(&block_root, r) + } + + pub async fn reconstruct_data_columns( + self: &Arc, + block_root: Hash256, + ) -> Result< + Option<( + AvailabilityProcessingStatus, + DataColumnSidecarList, + )>, + BlockError, + > { + // As of now we only reconstruct data columns on supernodes, so if the block is already + // available on a supernode, there's no need to reconstruct as the node must already have + // all columns. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Ok(None); + } + + let data_availability_checker = self.data_availability_checker.clone(); + + let result = self + .task_executor + .spawn_blocking_handle( + move || data_availability_checker.reconstruct_data_columns(&block_root), + "reconstruct_data_columns", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin)??; + + match result { + DataColumnReconstructionResult::Success((availability, data_columns_to_publish)) => { + let Some(slot) = data_columns_to_publish.first().map(|d| d.slot()) else { + // This should be unreachable because empty result would return `RecoveredColumnsNotImported` instead of success. + return Ok(None); + }; + + let r = self + .process_availability(slot, availability, || Ok(())) + .await; + self.remove_notified(&block_root, r) + .map(|availability_processing_status| { + Some((availability_processing_status, data_columns_to_publish)) + }) + } + DataColumnReconstructionResult::NotStarted(reason) + | DataColumnReconstructionResult::RecoveredColumnsNotImported(reason) => { + // We use metric here because logging this would be *very* noisy. + metrics::inc_counter_vec( + &metrics::KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL, + &[reason], + ); + Ok(None) + } + } } /// Remove any block components from the *processing cache* if we no longer require them. If the @@ -3176,23 +3215,6 @@ impl BeaconChain { r } - /// Remove any block components from the *processing cache* if we no longer require them. If the - /// block was imported full or erred, we no longer require them. - fn remove_notified_custody_columns

( - &self, - block_root: &Hash256, - r: Result<(AvailabilityProcessingStatus, P), BlockError>, - ) -> Result<(AvailabilityProcessingStatus, P), BlockError> { - let has_missing_components = matches!( - r, - Ok((AvailabilityProcessingStatus::MissingComponents(_, _), _)) - ); - if !has_missing_components { - self.reqresp_pre_import_cache.write().remove(block_root); - } - r - } - /// Wraps `process_block` in logic to cache the block's commitments in the processing cache /// and evict if the block was imported or errored. pub async fn process_block_with_early_caching>( @@ -3448,26 +3470,21 @@ impl BeaconChain { block_root: Hash256, data_columns: Vec>, publish_fn: impl FnOnce() -> Result<(), BlockError>, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { if let Some(slasher) = self.slasher.as_ref() { for data_colum in &data_columns { slasher.accept_block_header(data_colum.signed_block_header()); } } - let (availability, data_columns_to_publish) = self - .data_availability_checker - .put_gossip_data_columns(slot, block_root, data_columns)?; + let availability = self.data_availability_checker.put_gossip_data_columns( + slot, + block_root, + data_columns, + )?; self.process_availability(slot, availability, publish_fn) .await - .map(|result| (result, data_columns_to_publish)) } /// Checks if the provided blobs can make any cached blocks available, and imports immediately @@ -3517,13 +3534,7 @@ impl BeaconChain { slot: Slot, block_root: Hash256, custody_columns: DataColumnSidecarList, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { // Need to scope this to ensure the lock is dropped before calling `process_availability` // Even an explicit drop is not enough to convince the borrow checker. { @@ -3548,16 +3559,14 @@ impl BeaconChain { // This slot value is purely informative for the consumers of // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. - let (availability, data_columns_to_publish) = - self.data_availability_checker.put_rpc_custody_columns( - block_root, - slot.epoch(T::EthSpec::slots_per_epoch()), - custody_columns, - )?; + let availability = self.data_availability_checker.put_rpc_custody_columns( + block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + custody_columns, + )?; self.process_availability(slot, availability, || Ok(())) .await - .map(|result| (result, data_columns_to_publish)) } /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` @@ -3855,6 +3864,8 @@ impl BeaconChain { } if let Some(data_columns) = data_columns { + // TODO(das): `available_block includes all sampled columns, but we only need to store + // custody columns. To be clarified in spec. if !data_columns.is_empty() { debug!( self.log, "Writing data_columns to store"; @@ -5555,10 +5566,15 @@ impl BeaconChain { ) } BeaconState::Deneb(_) => { - let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) = - block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .deconstruct(); + let ( + payload, + kzg_commitments, + maybe_blobs_and_proofs, + _maybe_requests, + execution_payload_value, + ) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); ( BeaconBlock::Deneb(BeaconBlockDeneb { @@ -5593,10 +5609,15 @@ impl BeaconChain { ) } BeaconState::Electra(_) => { - let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) = - block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .deconstruct(); + let ( + payload, + kzg_commitments, + maybe_blobs_and_proofs, + maybe_requests, + execution_payload_value, + ) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); ( BeaconBlock::Electra(BeaconBlockElectra { @@ -5621,6 +5642,8 @@ impl BeaconChain { bls_to_execution_changes: bls_to_execution_changes.into(), blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, + execution_requests: maybe_requests + .ok_or(BlockProductionError::MissingExecutionRequests)?, }, }), maybe_blobs_and_proofs, @@ -5945,26 +5968,23 @@ impl BeaconChain { payload_attributes } else { let prepare_slot_fork = self.spec.fork_name_at_slot::(prepare_slot); - let withdrawals = match prepare_slot_fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix => None, - ForkName::Capella | ForkName::Deneb | ForkName::Electra => { - let chain = self.clone(); - self.spawn_blocking_handle( - move || { - chain.get_expected_withdrawals(&forkchoice_update_params, prepare_slot) - }, - "prepare_beacon_proposer_withdrawals", - ) - .await? - .map(Some)? - } + + let withdrawals = if prepare_slot_fork.capella_enabled() { + let chain = self.clone(); + self.spawn_blocking_handle( + move || chain.get_expected_withdrawals(&forkchoice_update_params, prepare_slot), + "prepare_beacon_proposer_withdrawals", + ) + .await? + .map(Some)? + } else { + None }; - let parent_beacon_block_root = match prepare_slot_fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => None, - ForkName::Deneb | ForkName::Electra => { - Some(pre_payload_attributes.parent_beacon_block_root) - } + let parent_beacon_block_root = if prepare_slot_fork.deneb_enabled() { + Some(pre_payload_attributes.parent_beacon_block_root) + } else { + None }; let payload_attributes = PayloadAttributes::new( @@ -6110,27 +6130,27 @@ impl BeaconChain { // `execution_engine_forkchoice_lock` apart from the one here. let forkchoice_lock = execution_layer.execution_engine_forkchoice_lock().await; - let (head_block_root, head_hash, justified_hash, finalized_hash) = if let Some(head_hash) = - params.head_hash - { - ( - params.head_root, - head_hash, - params - .justified_hash - .unwrap_or_else(ExecutionBlockHash::zero), - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // The head block does not have an execution block hash. We must check to see if we - // happen to be the proposer of the transition block, in which case we still need to - // send forkchoice_updated. - match self.spec.fork_name_at_slot::(next_slot) { - // We are pre-bellatrix; no need to update the EL. - ForkName::Base | ForkName::Altair => return Ok(()), - _ => { + let (head_block_root, head_hash, justified_hash, finalized_hash) = + if let Some(head_hash) = params.head_hash { + ( + params.head_root, + head_hash, + params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) + } else { + // The head block does not have an execution block hash. We must check to see if we + // happen to be the proposer of the transition block, in which case we still need to + // send forkchoice_updated. + if self + .spec + .fork_name_at_slot::(next_slot) + .bellatrix_enabled() + { // We are post-bellatrix if let Some(payload_attributes) = execution_layer .payload_attributes(next_slot, params.head_root) @@ -6164,9 +6184,10 @@ impl BeaconChain { // We are not a proposer, no need to update the EL. return Ok(()); } + } else { + return Ok(()); } - } - }; + }; let forkchoice_updated_response = execution_layer .notify_forkchoice_updated( @@ -7009,7 +7030,6 @@ impl BeaconChain { .finalized_checkpoint() .epoch .sync_committee_period(&self.spec)?; - self.light_client_server_cache.get_light_client_bootstrap( &self.store, block_root, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a8233f170f6..527462ab64c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -55,8 +55,8 @@ use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlo use crate::data_column_verification::GossipDataColumnError; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ - is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, + validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, + NotifyExecutionLayer, PayloadNotifier, }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; @@ -70,11 +70,11 @@ use derivative::Derivative; use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; -use lighthouse_metrics::TryExt; +use metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, warn, Logger}; +use slog::{debug, error, Logger}; use slot_clock::SlotClock; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -95,9 +95,9 @@ use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, - BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - FullPayload, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, + Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; pub const POS_PANDA_BANNER: &str = r#" @@ -1388,28 +1388,6 @@ impl ExecutionPendingBlock { } let payload_verification_status = payload_notifier.notify_new_payload().await?; - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let block_hash_opt = block - .message() - .body() - .execution_payload() - .map(|full_payload| full_payload.block_hash()); - - // Ensure the block is a candidate for optimistic import. - if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? - { - warn!( - chain.log, - "Rejecting optimistic block"; - "block_hash" => ?block_hash_opt, - "msg" => "the execution engine is not synced" - ); - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); - } - } - Ok(PayloadVerificationOutcome { payload_verification_status, is_valid_merge_transition_block, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 001dbf00808..5f1e94fc8c6 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -984,6 +984,7 @@ where store, self.import_all_data_columns, self.spec, + log.new(o!("service" => "data_availability_checker")), ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 4d5afdc8904..047764d705c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -2,10 +2,12 @@ use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, Kzg use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, }; -use crate::data_availability_checker::overflow_lru_cache::DataAvailabilityCheckerInner; -use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; +use crate::data_availability_checker::overflow_lru_cache::{ + DataAvailabilityCheckerInner, ReconstructColumnsDecision, +}; +use crate::{metrics, BeaconChain, BeaconChainTypes, BeaconStore}; use kzg::Kzg; -use slog::{debug, error}; +use slog::{debug, error, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; @@ -27,11 +29,12 @@ use crate::data_column_verification::{ verify_kzg_for_data_column, verify_kzg_for_data_column_list, CustodyDataColumn, GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, }; +use crate::metrics::{ + KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, +}; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; -pub use self::overflow_lru_cache::DataColumnsToPublish; - /// The LRU Cache stores `PendingComponents` which can store up to /// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So /// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this @@ -71,6 +74,16 @@ pub struct DataAvailabilityChecker { slot_clock: T::SlotClock, kzg: Arc, spec: Arc, + log: Logger, +} + +pub type AvailabilityAndReconstructedColumns = (Availability, DataColumnSidecarList); + +#[derive(Debug)] +pub enum DataColumnReconstructionResult { + Success(AvailabilityAndReconstructedColumns), + NotStarted(&'static str), + RecoveredColumnsNotImported(&'static str), } /// This type is returned after adding a block / blob to the `DataAvailabilityChecker`. @@ -101,6 +114,7 @@ impl DataAvailabilityChecker { store: BeaconStore, import_all_data_columns: bool, spec: Arc, + log: Logger, ) -> Result { let custody_subnet_count = if import_all_data_columns { spec.data_column_sidecar_subnet_count as usize @@ -108,13 +122,15 @@ impl DataAvailabilityChecker { spec.custody_requirement as usize }; - let custody_column_count = - custody_subnet_count.saturating_mul(spec.data_columns_per_subnet()); + let subnet_sampling_size = + std::cmp::max(custody_subnet_count, spec.samples_per_slot as usize); + let sampling_column_count = + subnet_sampling_size.saturating_mul(spec.data_columns_per_subnet()); let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, - custody_column_count, + sampling_column_count, spec.clone(), )?; Ok(Self { @@ -122,13 +138,12 @@ impl DataAvailabilityChecker { slot_clock, kzg, spec, + log, }) } - pub fn get_custody_columns_count(&self) -> usize { - self.availability_cache - .custody_subnet_count() - .saturating_mul(self.spec.data_columns_per_subnet()) + pub fn get_sampling_column_count(&self) -> usize { + self.availability_cache.sampling_column_count() } /// Checks if the block root is currenlty in the availability cache awaiting import because @@ -141,9 +156,9 @@ impl DataAvailabilityChecker { .get_execution_valid_block(block_root) } - /// Return the set of imported blob indexes for `block_root`. Returns None if there is no block + /// Return the set of cached blob indexes for `block_root`. Returns None if there is no block /// component for `block_root`. - pub fn imported_blob_indexes(&self, block_root: &Hash256) -> Option> { + pub fn cached_blob_indexes(&self, block_root: &Hash256) -> Option> { self.availability_cache .peek_pending_components(block_root, |components| { components.map(|components| { @@ -156,9 +171,9 @@ impl DataAvailabilityChecker { }) } - /// Return the set of imported custody column indexes for `block_root`. Returns None if there is + /// Return the set of cached custody column indexes for `block_root`. Returns None if there is /// no block component for `block_root`. - pub fn imported_custody_column_indexes(&self, block_root: &Hash256) -> Option> { + pub fn cached_data_column_indexes(&self, block_root: &Hash256) -> Option> { self.availability_cache .peek_pending_components(block_root, |components| { components.map(|components| components.get_cached_data_columns_indices()) @@ -205,7 +220,7 @@ impl DataAvailabilityChecker { .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache - .put_kzg_verified_blobs(block_root, epoch, verified_blobs) + .put_kzg_verified_blobs(block_root, epoch, verified_blobs, &self.log) } /// Put a list of custody columns received via RPC into the availability cache. This performs KZG @@ -216,8 +231,7 @@ impl DataAvailabilityChecker { block_root: Hash256, epoch: Epoch, custody_columns: DataColumnSidecarList, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + ) -> Result, AvailabilityCheckError> { // TODO(das): report which column is invalid for proper peer scoring // TODO(das): batch KZG verification here, but fallback into checking each column // individually to report which column(s) are invalid. @@ -233,10 +247,10 @@ impl DataAvailabilityChecker { .collect::, AvailabilityCheckError>>()?; self.availability_cache.put_kzg_verified_data_columns( - &self.kzg, block_root, epoch, verified_custody_columns, + &self.log, ) } @@ -253,6 +267,7 @@ impl DataAvailabilityChecker { gossip_blob.block_root(), gossip_blob.epoch(), vec![gossip_blob.into_inner()], + &self.log, ) } @@ -267,8 +282,7 @@ impl DataAvailabilityChecker { slot: Slot, block_root: Hash256, gossip_data_columns: Vec>, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + ) -> Result, AvailabilityCheckError> { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); let custody_columns = gossip_data_columns @@ -277,10 +291,10 @@ impl DataAvailabilityChecker { .collect::>(); self.availability_cache.put_kzg_verified_data_columns( - &self.kzg, block_root, epoch, custody_columns, + &self.log, ) } @@ -291,7 +305,7 @@ impl DataAvailabilityChecker { executed_block: AvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { self.availability_cache - .put_pending_executed_block(executed_block) + .put_pending_executed_block(executed_block, &self.log) } pub fn remove_pending_components(&self, block_root: Hash256) { @@ -511,6 +525,92 @@ impl DataAvailabilityChecker { block_cache_size: self.availability_cache.block_cache_size(), } } + + pub fn reconstruct_data_columns( + &self, + block_root: &Hash256, + ) -> Result, AvailabilityCheckError> { + let pending_components = match self + .availability_cache + .check_and_set_reconstruction_started(block_root) + { + ReconstructColumnsDecision::Yes(pending_components) => pending_components, + ReconstructColumnsDecision::No(reason) => { + return Ok(DataColumnReconstructionResult::NotStarted(reason)); + } + }; + + metrics::inc_counter(&KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS); + let timer = metrics::start_timer(&metrics::DATA_AVAILABILITY_RECONSTRUCTION_TIME); + + let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( + &self.kzg, + &pending_components.verified_data_columns, + &self.spec, + ) + .map_err(|e| { + error!( + self.log, + "Error reconstructing data columns"; + "block_root" => ?block_root, + "error" => ?e + ); + self.availability_cache + .handle_reconstruction_failure(block_root); + metrics::inc_counter(&KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES); + AvailabilityCheckError::ReconstructColumnsError(e) + })?; + + // Check indices from cache again to make sure we don't publish components we've already received. + let Some(existing_column_indices) = self.cached_data_column_indexes(block_root) else { + return Ok(DataColumnReconstructionResult::RecoveredColumnsNotImported( + "block already imported", + )); + }; + + let data_columns_to_publish = all_data_columns + .into_iter() + .filter(|d| !existing_column_indices.contains(&d.index())) + .collect::>(); + + let Some(slot) = data_columns_to_publish + .first() + .map(|d| d.as_data_column().slot()) + else { + return Ok(DataColumnReconstructionResult::RecoveredColumnsNotImported( + "No new columns to import and publish", + )); + }; + + metrics::stop_timer(timer); + metrics::inc_counter_by( + &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, + data_columns_to_publish.len() as u64, + ); + + debug!(self.log, "Reconstructed columns"; + "count" => data_columns_to_publish.len(), + "block_root" => ?block_root, + "slot" => slot, + ); + + self.availability_cache + .put_kzg_verified_data_columns( + *block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + data_columns_to_publish.clone(), + &self.log, + ) + .map(|availability| { + DataColumnReconstructionResult::Success(( + availability, + data_columns_to_publish + .into_iter() + .map(|d| d.clone_arc()) + .collect::>(), + )) + }) + } } /// Helper struct to group data availability checker metrics. diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 46ab08a8215..6d4636e8ed8 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -6,23 +6,19 @@ use crate::block_verification_types::{ }; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; use crate::data_column_verification::KzgVerifiedCustodyDataColumn; -use crate::metrics; use crate::BeaconChainTypes; -use kzg::Kzg; use lru::LruCache; use parking_lot::RwLock; +use slog::{debug, Logger}; use ssz_types::{FixedVector, VariableList}; -use std::collections::HashSet; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, + Hash256, SignedBeaconBlock, }; -pub type DataColumnsToPublish = Option>; - /// This represents the components of a partially available block /// /// The blobs are all gossip and kzg verified. @@ -40,7 +36,7 @@ pub struct PendingComponents { pub enum BlockImportRequirement { AllBlobs, - CustodyColumns(usize), + ColumnSampling(usize), } impl PendingComponents { @@ -95,7 +91,7 @@ impl PendingComponents { /// block. /// /// This corresponds to the number of commitments that are present in a block. - pub fn num_expected_blobs(&self) -> Option { + pub fn block_kzg_commitments_count(&self) -> Option { self.get_cached_block() .as_ref() .map(|b| b.get_commitments().len()) @@ -203,21 +199,61 @@ impl PendingComponents { /// /// Returns `true` if both the block exists and the number of received blobs / custody columns /// matches the number of expected blobs / custody columns. - pub fn is_available(&self, block_import_requirement: &BlockImportRequirement) -> bool { + pub fn is_available( + &self, + block_import_requirement: &BlockImportRequirement, + log: &Logger, + ) -> bool { + let block_kzg_commitments_count_opt = self.block_kzg_commitments_count(); + match block_import_requirement { - BlockImportRequirement::AllBlobs => self - .num_expected_blobs() - .map_or(false, |num_expected_blobs| { - num_expected_blobs == self.num_received_blobs() - }), - BlockImportRequirement::CustodyColumns(num_expected_columns) => { - let num_received_data_columns = self.num_received_data_columns(); + BlockImportRequirement::AllBlobs => { + let received_blobs = self.num_received_blobs(); + let expected_blobs_msg = block_kzg_commitments_count_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); + + debug!(log, + "Component(s) added to data availability checker"; + "block_root" => ?self.block_root, + "received_block" => block_kzg_commitments_count_opt.is_some(), + "received_blobs" => received_blobs, + "expected_blobs" => expected_blobs_msg, + ); + + block_kzg_commitments_count_opt.map_or(false, |num_expected_blobs| { + num_expected_blobs == received_blobs + }) + } + BlockImportRequirement::ColumnSampling(num_expected_columns) => { // No data columns when there are 0 blobs - self.num_expected_blobs() - .map_or(false, |num_expected_blobs| { - num_expected_blobs == 0 - || *num_expected_columns == num_received_data_columns - }) + let expected_columns_opt = block_kzg_commitments_count_opt.map(|blob_count| { + if blob_count > 0 { + *num_expected_columns + } else { + 0 + } + }); + + let expected_columns_msg = expected_columns_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); + + let num_received_columns = self.num_received_data_columns(); + + debug!(log, + "Component(s) added to data availability checker"; + "block_root" => ?self.block_root, + "received_block" => block_kzg_commitments_count_opt.is_some(), + "received_columns" => num_received_columns, + "expected_columns" => expected_columns_msg, + ); + + expected_columns_opt.map_or(false, |num_expected_columns| { + num_expected_columns == num_received_columns + }) } } } @@ -281,7 +317,7 @@ impl PendingComponents { }; (Some(VariableList::new(verified_blobs)?), None) } - BlockImportRequirement::CustodyColumns(_) => { + BlockImportRequirement::ColumnSampling(_) => { let verified_data_columns = verified_data_columns .into_iter() .map(|d| d.into_inner()) @@ -311,10 +347,6 @@ impl PendingComponents { ))) } - pub fn reconstruction_started(&mut self) { - self.reconstruction_started = true; - } - /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob. pub fn epoch(&self) -> Option { self.executed_block @@ -353,28 +385,37 @@ pub struct DataAvailabilityCheckerInner { /// This cache holds a limited number of states in memory and reconstructs them /// from disk when necessary. This is necessary until we merge tree-states state_cache: StateLRUCache, - /// The number of data columns the node is custodying. - custody_column_count: usize, + /// The number of data columns the node is sampling via subnet sampling. + sampling_column_count: usize, spec: Arc, } +// This enum is only used internally within the crate in the reconstruction function to improve +// readability, so it's OK to not box the variant value, and it shouldn't impact memory much with +// the current usage, as it's deconstructed immediately. +#[allow(clippy::large_enum_variant)] +pub(crate) enum ReconstructColumnsDecision { + Yes(PendingComponents), + No(&'static str), +} + impl DataAvailabilityCheckerInner { pub fn new( capacity: NonZeroUsize, beacon_store: BeaconStore, - custody_column_count: usize, + sampling_column_count: usize, spec: Arc, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), - custody_column_count, + sampling_column_count, spec, }) } - pub fn custody_subnet_count(&self) -> usize { - self.custody_column_count + pub fn sampling_column_count(&self) -> usize { + self.sampling_column_count } /// Returns true if the block root is known, without altering the LRU ordering @@ -440,41 +481,20 @@ impl DataAvailabilityCheckerInner { ) -> Result { let peer_das_enabled = self.spec.is_peer_das_enabled_for_epoch(epoch); if peer_das_enabled { - Ok(BlockImportRequirement::CustodyColumns( - self.custody_column_count, + Ok(BlockImportRequirement::ColumnSampling( + self.sampling_column_count, )) } else { Ok(BlockImportRequirement::AllBlobs) } } - /// Potentially trigger reconstruction if: - /// - Our custody requirement is all columns - /// - We >= 50% of columns, but not all columns - fn should_reconstruct( - &self, - block_import_requirement: &BlockImportRequirement, - pending_components: &PendingComponents, - ) -> bool { - let BlockImportRequirement::CustodyColumns(num_expected_columns) = block_import_requirement - else { - return false; - }; - - let num_of_columns = self.spec.number_of_columns; - let has_missing_columns = pending_components.verified_data_columns.len() < num_of_columns; - - has_missing_columns - && !pending_components.reconstruction_started - && *num_expected_columns == num_of_columns - && pending_components.verified_data_columns.len() >= num_of_columns / 2 - } - pub fn put_kzg_verified_blobs>>( &self, block_root: Hash256, epoch: Epoch, kzg_verified_blobs: I, + log: &Logger, ) -> Result, AvailabilityCheckError> { let mut fixed_blobs = FixedVector::default(); @@ -496,7 +516,7 @@ impl DataAvailabilityCheckerInner { pending_components.merge_blobs(fixed_blobs); let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); @@ -514,12 +534,11 @@ impl DataAvailabilityCheckerInner { I: IntoIterator>, >( &self, - kzg: &Kzg, block_root: Hash256, epoch: Epoch, kzg_verified_data_columns: I, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + log: &Logger, + ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); // Grab existing entry or create a new entry. @@ -533,65 +552,67 @@ impl DataAvailabilityCheckerInner { let block_import_requirement = self.block_import_requirement(epoch)?; - // Potentially trigger reconstruction if: - // - Our custody requirement is all columns - // - We >= 50% of columns - let data_columns_to_publish = - if self.should_reconstruct(&block_import_requirement, &pending_components) { - pending_components.reconstruction_started(); - - let timer = metrics::start_timer(&metrics::DATA_AVAILABILITY_RECONSTRUCTION_TIME); - - let existing_column_indices = pending_components - .verified_data_columns - .iter() - .map(|d| d.index()) - .collect::>(); - - // Will only return an error if: - // - < 50% of columns - // - There are duplicates - let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( - kzg, - pending_components.verified_data_columns.as_slice(), - &self.spec, - ) - .map_err(AvailabilityCheckError::ReconstructColumnsError)?; - - let data_columns_to_publish = all_data_columns - .iter() - .filter(|d| !existing_column_indices.contains(&d.index())) - .map(|d| d.clone_arc()) - .collect::>(); - - pending_components.verified_data_columns = all_data_columns; - - metrics::stop_timer(timer); - metrics::inc_counter_by( - &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, - data_columns_to_publish.len() as u64, - ); - - Some(data_columns_to_publish) - } else { - None - }; - - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components - .make_available(block_import_requirement, &self.spec, |diet_block| { - self.state_cache.recover_pending_executed_block(diet_block) - }) - .map(|availability| (availability, data_columns_to_publish)) + pending_components.make_available(block_import_requirement, &self.spec, |diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) } else { write_lock.put(block_root, pending_components); - Ok(( - Availability::MissingComponents(block_root), - data_columns_to_publish, - )) + Ok(Availability::MissingComponents(block_root)) + } + } + + /// Check whether data column reconstruction should be attempted. + /// + /// Potentially trigger reconstruction if: + /// - Our custody requirement is all columns (supernode), and we haven't got all columns + /// - We have >= 50% of columns, but not all columns + /// - Reconstruction hasn't been started for the block + /// + /// If reconstruction is required, returns `PendingComponents` which contains the + /// components to be used as inputs to reconstruction, otherwise returns a `reason`. + pub fn check_and_set_reconstruction_started( + &self, + block_root: &Hash256, + ) -> ReconstructColumnsDecision { + let mut write_lock = self.critical.write(); + let Some(pending_components) = write_lock.get_mut(block_root) else { + // Block may have been imported as it does not exist in availability cache. + return ReconstructColumnsDecision::No("block already imported"); + }; + + // If we're sampling all columns, it means we must be custodying all columns. + let custody_column_count = self.sampling_column_count(); + let total_column_count = self.spec.number_of_columns; + let received_column_count = pending_components.verified_data_columns.len(); + + if pending_components.reconstruction_started { + return ReconstructColumnsDecision::No("already started"); + } + if custody_column_count != total_column_count { + return ReconstructColumnsDecision::No("not required for full node"); + } + if received_column_count == self.spec.number_of_columns { + return ReconstructColumnsDecision::No("all columns received"); + } + if received_column_count < total_column_count / 2 { + return ReconstructColumnsDecision::No("not enough columns"); + } + + pending_components.reconstruction_started = true; + ReconstructColumnsDecision::Yes(pending_components.clone()) + } + + /// This could mean some invalid data columns made it through to the `DataAvailabilityChecker`. + /// In this case, we remove all data columns in `PendingComponents`, reset reconstruction + /// status so that we can attempt to retrieve columns from peers again. + pub fn handle_reconstruction_failure(&self, block_root: &Hash256) { + if let Some(pending_components_mut) = self.critical.write().get_mut(block_root) { + pending_components_mut.verified_data_columns = vec![]; + pending_components_mut.reconstruction_started = false; } } @@ -600,6 +621,7 @@ impl DataAvailabilityCheckerInner { pub fn put_pending_executed_block( &self, executed_block: AvailabilityPendingExecutedBlock, + log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); let block_root = executed_block.import_data.block_root; @@ -621,7 +643,7 @@ impl DataAvailabilityCheckerInner { // Check if we have all components and entire set is consistent. let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); @@ -919,7 +941,7 @@ mod test { ); assert!(cache.critical.read().is_empty(), "cache should be empty"); let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); if blobs_expected == 0 { assert!( @@ -958,7 +980,7 @@ mod test { for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone()) + .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); if blob_index == blobs_expected - 1 { assert!(matches!(availability, Availability::Available(_))); @@ -985,7 +1007,7 @@ mod test { for gossip_blob in blobs { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone()) + .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); assert_eq!( availability, @@ -995,7 +1017,7 @@ mod test { assert_eq!(cache.critical.read().len(), 1); } let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); assert!( matches!(availability, Availability::Available(_)), @@ -1063,7 +1085,7 @@ mod test { // put the block in the cache let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); // grab the diet block from the cache for later testing diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 1647f190cfa..a4e83b27514 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -127,6 +127,25 @@ pub enum GossipDataColumnError { slot: Slot, index: ColumnIndex, }, + /// Data column index must be between 0 and `NUMBER_OF_COLUMNS` (exclusive). + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + InvalidColumnIndex(u64), + /// Data column not expected for a block with empty kzg commitments. + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + UnexpectedDataColumn, + /// The data column length must be equal to the number of commitments/proofs, otherwise the + /// sidecar is invalid. + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + InconsistentCommitmentsOrProofLength, } impl From for GossipDataColumnError { @@ -294,10 +313,7 @@ impl KzgVerifiedCustodyDataColumn { kzg: &Kzg, partial_set_of_columns: &[Self], spec: &ChainSpec, - ) -> Result, KzgError> { - // Will only return an error if: - // - < 50% of columns - // - There are duplicates + ) -> Result>, KzgError> { let all_data_columns = reconstruct_data_columns( kzg, &partial_set_of_columns @@ -309,10 +325,8 @@ impl KzgVerifiedCustodyDataColumn { Ok(all_data_columns .into_iter() - .map(|d| { - KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { - data: d, - }) + .map(|data| { + KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { data }) }) .collect::>()) } @@ -367,7 +381,7 @@ pub fn validate_data_column_sidecar_for_gossip( chain: &BeaconChain, ) -> Result, GossipDataColumnError> { let column_slot = data_column.slot(); - + verify_data_column_sidecar(&data_column, &chain.spec)?; verify_index_matches_subnet(&data_column, subnet, &chain.spec)?; verify_sidecar_not_from_future_slot(chain, column_slot)?; verify_slot_greater_than_latest_finalized_slot(chain, column_slot)?; @@ -396,6 +410,26 @@ pub fn validate_data_column_sidecar_for_gossip( }) } +/// Verify if the data column sidecar is valid. +fn verify_data_column_sidecar( + data_column: &DataColumnSidecar, + spec: &ChainSpec, +) -> Result<(), GossipDataColumnError> { + if data_column.index >= spec.number_of_columns as u64 { + return Err(GossipDataColumnError::InvalidColumnIndex(data_column.index)); + } + if data_column.kzg_commitments.is_empty() { + return Err(GossipDataColumnError::UnexpectedDataColumn); + } + if data_column.column.len() != data_column.kzg_commitments.len() + || data_column.column.len() != data_column.kzg_proofs.len() + { + return Err(GossipDataColumnError::InconsistentCommitmentsOrProofLength); + } + + Ok(()) +} + // Verify that this is the first column sidecar received for the tuple: // (block_header.slot, block_header.proposer_index, column_sidecar.index) fn verify_is_first_sidecar( @@ -613,3 +647,55 @@ fn verify_sidecar_not_from_future_slot( } Ok(()) } + +#[cfg(test)] +mod test { + use crate::data_column_verification::{ + validate_data_column_sidecar_for_gossip, GossipDataColumnError, + }; + use crate::test_utils::BeaconChainHarness; + use types::{DataColumnSidecar, EthSpec, ForkName, MainnetEthSpec}; + + type E = MainnetEthSpec; + + #[tokio::test] + async fn empty_data_column_sidecars_fails_validation() { + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + harness.advance_slot(); + + let slot = harness.get_current_slot(); + let state = harness.get_current_state(); + let ((block, _blobs_opt), _state) = harness + .make_block_with_modifier(state, slot, |block| { + *block.body_mut().blob_kzg_commitments_mut().unwrap() = vec![].into(); + }) + .await; + + let index = 0; + let column_sidecar = DataColumnSidecar:: { + index, + column: vec![].into(), + kzg_commitments: vec![].into(), + kzg_proofs: vec![].into(), + signed_block_header: block.signed_block_header(), + kzg_commitments_inclusion_proof: block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(), + }; + + let result = + validate_data_column_sidecar_for_gossip(column_sidecar.into(), index, &harness.chain); + assert!(matches!( + result.err(), + Some(GossipDataColumnError::UnexpectedDataColumn) + )); + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 8a317ce7549..2a8fd4cd015 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -4,7 +4,6 @@ use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; use crate::data_availability_checker::AvailabilityCheckError; use crate::eth1_chain::Error as Eth1ChainError; -use crate::historical_blocks::HistoricalBlockError; use crate::migrate::PruningError; use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; @@ -123,7 +122,11 @@ pub enum BeaconChainError { block_slot: Slot, state_slot: Slot, }, - HistoricalBlockError(HistoricalBlockError), + /// Block is not available (only returned when fetching historic blocks). + HistoricalBlockOutOfRange { + slot: Slot, + oldest_block_slot: Slot, + }, InvalidStateForShuffling { state_epoch: Epoch, shuffling_epoch: Epoch, @@ -245,7 +248,6 @@ easy_from_to!(BlockSignatureVerifierError, BeaconChainError); easy_from_to!(PruningError, BeaconChainError); easy_from_to!(ArithError, BeaconChainError); easy_from_to!(ForkChoiceStoreError, BeaconChainError); -easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError); easy_from_to!(BlockReplayError, BeaconChainError); easy_from_to!(InconsistentFork, BeaconChainError); @@ -294,6 +296,7 @@ pub enum BlockProductionError { InvalidBlockVariant(String), KzgError(kzg::Error), FailedToBuildBlobSidecars(String), + MissingExecutionRequests, } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b9b98bfbc00..f2420eea0d2 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -277,9 +277,7 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } .into()), None => { - if allow_optimistic_import == AllowOptimisticImport::Yes - && is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? - { + if allow_optimistic_import == AllowOptimisticImport::Yes { debug!( chain.log, "Optimistically importing merge transition block"; @@ -297,36 +295,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } } -/// Check to see if a block with the given parameters is valid to be imported optimistically. -pub async fn is_optimistic_candidate_block( - chain: &Arc>, - block_slot: Slot, - block_parent_root: Hash256, -) -> Result { - let current_slot = chain.slot()?; - let inner_chain = chain.clone(); - - // Use a blocking task to check if the block is an optimistic candidate. Interacting - // with the `fork_choice` lock in an async task can block the core executor. - chain - .spawn_blocking_handle( - move || { - inner_chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_candidate_block( - current_slot, - block_slot, - &block_parent_root, - &inner_chain.spec, - ) - }, - "validate_merge_block_optimistic_candidate", - ) - .await? - .map_err(BeaconChainError::from) -} - /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1372211b175..813eb906b95 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,5 +1,5 @@ use crate::data_availability_checker::AvailableBlock; -use crate::{errors::BeaconChainError as Error, metrics, BeaconChain, BeaconChainTypes}; +use crate::{metrics, BeaconChain, BeaconChainTypes}; use itertools::Itertools; use slog::debug; use state_processing::{ @@ -10,7 +10,11 @@ use std::borrow::Cow; use std::iter; use std::time::Duration; use store::metadata::DataColumnInfo; -use store::{chunked_vector::BlockRoots, AnchorInfo, BlobInfo, ChunkWriter, KeyValueStore}; +use store::{ + chunked_vector::BlockRoots, AnchorInfo, BlobInfo, ChunkWriter, Error as StoreError, + KeyValueStore, +}; +use strum::IntoStaticStr; use types::{FixedBytesExtended, Hash256, Slot}; /// Use a longer timeout on the pubkey cache. @@ -18,10 +22,8 @@ use types::{FixedBytesExtended, Hash256, Slot}; /// It's ok if historical sync is stalled due to writes from forwards block processing. const PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(30); -#[derive(Debug)] +#[derive(Debug, IntoStaticStr)] pub enum HistoricalBlockError { - /// Block is not available (only returned when fetching historic blocks). - BlockOutOfRange { slot: Slot, oldest_block_slot: Slot }, /// Block root mismatch, caller should retry with different blocks. MismatchedBlockRoot { block_root: Hash256, @@ -37,6 +39,14 @@ pub enum HistoricalBlockError { NoAnchorInfo, /// Logic error: should never occur. IndexOutOfBounds, + /// Internal store error + StoreError(StoreError), +} + +impl From for HistoricalBlockError { + fn from(e: StoreError) -> Self { + Self::StoreError(e) + } } impl BeaconChain { @@ -61,7 +71,7 @@ impl BeaconChain { pub fn import_historical_block_batch( &self, mut blocks: Vec>, - ) -> Result { + ) -> Result { let anchor_info = self .store .get_anchor_info() @@ -94,7 +104,9 @@ impl BeaconChain { // Blobs are stored per block, and data columns are each stored individually let n_blob_ops_per_block = if self.spec.is_peer_das_scheduled() { - self.data_availability_checker.get_custody_columns_count() + // TODO(das): `available_block includes all sampled columns, but we only need to store + // custody columns. To be clarified in spec PR. + self.data_availability_checker.get_sampling_column_count() } else { 1 }; @@ -125,8 +137,7 @@ impl BeaconChain { return Err(HistoricalBlockError::MismatchedBlockRoot { block_root, expected_block_root, - } - .into()); + }); } let blinded_block = block.clone_as_blinded(); @@ -210,7 +221,7 @@ impl BeaconChain { let verify_timer = metrics::start_timer(&metrics::BACKFILL_SIGNATURE_VERIFY_TIMES); if !signature_set.verify() { - return Err(HistoricalBlockError::InvalidSignature.into()); + return Err(HistoricalBlockError::InvalidSignature); } drop(verify_timer); drop(sig_timer); diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index ca015d0365a..e0ddd8c8826 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,25 +1,19 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; -use eth2::types::light_client_update::CurrentSyncCommitteeProofLen; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use slog::{debug, Logger}; use ssz::Decode; -use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use store::DBColumn; use store::KeyValueStore; use tree_hash::TreeHash; -use types::light_client_update::{ - FinalizedRootProofLen, NextSyncCommitteeProofLen, CURRENT_SYNC_COMMITTEE_INDEX, - FINALIZED_ROOT_INDEX, NEXT_SYNC_COMMITTEE_INDEX, -}; use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, EthSpec, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - LightClientUpdate, Slot, SyncAggregate, SyncCommittee, + LightClientUpdate, MerkleProof, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -69,17 +63,14 @@ impl LightClientServerCache { block_post_state: &mut BeaconState, ) -> Result<(), BeaconChainError> { let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES); - + let fork_name = spec.fork_name_at_slot::(block.slot()); // Only post-altair - if spec.fork_name_at_slot::(block.slot()) == ForkName::Base { - return Ok(()); + if fork_name.altair_enabled() { + // Persist in memory cache for a descendent block + let cached_data = LightClientCachedData::from_state(block_post_state)?; + self.prev_block_cache.lock().put(block_root, cached_data); } - // Persist in memory cache for a descendent block - - let cached_data = LightClientCachedData::from_state(block_post_state)?; - self.prev_block_cache.lock().put(block_root, cached_data); - Ok(()) } @@ -413,16 +404,12 @@ impl Default for LightClientServerCache { } } -type FinalityBranch = FixedVector; -type NextSyncCommitteeBranch = FixedVector; -type CurrentSyncCommitteeBranch = FixedVector; - #[derive(Clone)] struct LightClientCachedData { finalized_checkpoint: Checkpoint, - finality_branch: FinalityBranch, - next_sync_committee_branch: NextSyncCommitteeBranch, - current_sync_committee_branch: CurrentSyncCommitteeBranch, + finality_branch: MerkleProof, + next_sync_committee_branch: MerkleProof, + current_sync_committee_branch: MerkleProof, next_sync_committee: Arc>, current_sync_committee: Arc>, finalized_block_root: Hash256, @@ -430,17 +417,18 @@ struct LightClientCachedData { impl LightClientCachedData { fn from_state(state: &mut BeaconState) -> Result { + let (finality_branch, next_sync_committee_branch, current_sync_committee_branch) = ( + state.compute_finalized_root_proof()?, + state.compute_current_sync_committee_proof()?, + state.compute_next_sync_committee_proof()?, + ); Ok(Self { finalized_checkpoint: state.finalized_checkpoint(), - finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + finality_branch, next_sync_committee: state.next_sync_committee()?.clone(), current_sync_committee: state.current_sync_committee()?.clone(), - next_sync_committee_branch: state - .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? - .into(), - current_sync_committee_branch: state - .compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)? - .into(), + next_sync_committee_branch, + current_sync_committee_branch, finalized_block_root: state.finalized_checkpoint().root, }) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index f15b46fc4bf..f73775d678f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -2,7 +2,7 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use bls::FixedBytesExtended; -pub use lighthouse_metrics::*; +pub use metrics::*; use slot_clock::SlotClock; use std::sync::LazyLock; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; @@ -1887,6 +1887,31 @@ pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> ) }); +pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "kzg_data_column_reconstruction_attempts", + "Count of times data column reconstruction has been attempted", + ) + }); + +pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "kzg_data_column_reconstruction_failures", + "Count of times data column reconstruction has failed", + ) + }); + +pub static KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "kzg_data_column_reconstruction_incomplete_total", + "Count of times data column reconstruction attempts did not result in an import", + &["reason"], + ) + }); + /* * light_client server metrics */ diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index ce36c8ca216..9be3b4cc2f9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -43,13 +43,15 @@ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::{o, Drain, Logger}; use slog_async::Async; -use slog_term::{FullFormat, TermDecorator}; +use slog_term::{FullFormat, PlainSyncDecorator, TermDecorator}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; +use std::fs::{File, OpenOptions}; +use std::io::BufWriter; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, LazyLock}; @@ -68,6 +70,8 @@ use types::{typenum::U4294967296, *}; pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // Environment variable to read if `fork_from_env` feature is enabled. pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; +// Environment variable to read if `ci_logger` feature is enabled. +pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR"; // Default target aggregators to set during testing, this ensures an aggregator at each slot. // @@ -2750,15 +2754,55 @@ pub struct MakeAttestationOptions { pub fork: Fork, } -pub fn build_log(level: slog::Level, enabled: bool) -> Logger { - let decorator = TermDecorator::new().build(); - let drain = FullFormat::new(decorator).build().fuse(); - let drain = Async::new(drain).build().fuse(); +pub enum LoggerType { + Test, + // The logs are output to files for each test. + CI, + // No logs will be printed. + Null, +} - if enabled { - Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - Logger::root(drain.filter(|_| false).fuse(), o!()) +fn ci_decorator() -> PlainSyncDecorator> { + let log_dir = std::env::var(CI_LOGGER_DIR_ENV_VAR).unwrap_or_else(|e| { + panic!("{CI_LOGGER_DIR_ENV_VAR} env var must be defined when using ci_logger: {e:?}"); + }); + let fork_name = std::env::var(FORK_NAME_ENV_VAR) + .map(|s| format!("{s}_")) + .unwrap_or_default(); + // The current test name can be got via the thread name. + let test_name = std::thread::current() + .name() + .unwrap() + .to_string() + // Colons are not allowed in files that are uploaded to GitHub Artifacts. + .replace("::", "_"); + let log_path = format!("/{log_dir}/{fork_name}{test_name}.log"); + let file = OpenOptions::new() + .create(true) + .append(true) + .open(log_path) + .unwrap(); + let file = BufWriter::new(file); + PlainSyncDecorator::new(file) +} + +pub fn build_log(level: slog::Level, logger_type: LoggerType) -> Logger { + match logger_type { + LoggerType::Test => { + let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); + let drain = Async::new(drain).chan_size(10_000).build().fuse(); + Logger::root(drain.filter_level(level).fuse(), o!()) + } + LoggerType::CI => { + let drain = FullFormat::new(ci_decorator()).build().fuse(); + let drain = Async::new(drain).chan_size(10_000).build().fuse(); + Logger::root(drain.filter_level(level).fuse(), o!()) + } + LoggerType::Null => { + let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); + let drain = Async::new(drain).build().fuse(); + Logger::root(drain.filter(|_| false).fuse(), o!()) + } } } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index e1f2cbb284f..0b121356b9d 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -86,7 +86,7 @@ async fn produces_attestations_from_attestation_simulator_service() { let expected_miss_metrics_count = 0; let expected_hit_metrics_count = num_blocks_produced - UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64; - lighthouse_metrics::gather().iter().for_each(|mf| { + metrics::gather().iter().for_each(|mf| { if hit_prometheus_metrics.contains(&mf.get_name()) { assert_eq!( mf.get_metric()[0].get_counter().get_value() as u64, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index f3b25ed5ce4..e168cbb6f4d 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -359,22 +359,24 @@ impl GossipTester { } pub fn earliest_valid_attestation_slot(&self) -> Slot { - let offset = match self.harness.spec.fork_name_at_epoch(self.epoch()) { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - // Subtract an additional slot since the harness will be exactly on the start of the - // slot and the propagation tolerance will allow an extra slot. - E::slots_per_epoch() + 1 - } + let offset = if self + .harness + .spec + .fork_name_at_epoch(self.epoch()) + .deneb_enabled() + { // EIP-7045 - ForkName::Deneb | ForkName::Electra => { - let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64(); - if epoch_slot_offset != 0 { - E::slots_per_epoch() + epoch_slot_offset - } else { - // Here the propagation tolerance will cause the cutoff to be an entire epoch earlier - 2 * E::slots_per_epoch() - } + let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64(); + if epoch_slot_offset != 0 { + E::slots_per_epoch() + epoch_slot_offset + } else { + // Here the propagation tolerance will cause the cutoff to be an entire epoch earlier + 2 * E::slots_per_epoch() } + } else { + // Subtract an additional slot since the harness will be exactly on the start of the + // slot and the propagation tolerance will allow an extra slot. + E::slots_per_epoch() + 1 }; self.slot() diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index dd195048e87..1325875a275 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,20 +1,14 @@ #![cfg(not(debug_assertions))] -use beacon_chain::otb_verification_service::{ - load_optimistic_transition_blocks, validate_optimistic_transition_blocks, - OptimisticTransitionBlock, -}; use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, - test_utils::ExecutionBlockGenerator, ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; @@ -1270,552 +1264,6 @@ async fn attesting_to_optimistic_head() { get_aggregated_by_slot_and_root().unwrap(); } -/// A helper struct to build out a chain of some configurable length which undergoes the merge -/// transition. -struct OptimisticTransitionSetup { - blocks: Vec>>, - execution_block_generator: ExecutionBlockGenerator, -} - -impl OptimisticTransitionSetup { - async fn new(num_blocks: usize, ttd: u64) -> Self { - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(ttd); - let mut rig = InvalidPayloadRig::new_with_spec(spec).enable_attestations(); - rig.move_to_terminal_block(); - - let mut blocks = Vec::with_capacity(num_blocks); - for _ in 0..num_blocks { - let root = rig.import_block(Payload::Valid).await; - let block = rig.harness.chain.get_block(&root).await.unwrap().unwrap(); - blocks.push(Arc::new(block)); - } - - let execution_block_generator = rig - .harness - .mock_execution_layer - .as_ref() - .unwrap() - .server - .execution_block_generator() - .clone(); - - Self { - blocks, - execution_block_generator, - } - } -} - -/// Build a chain which has optimistically imported a transition block. -/// -/// The initial chain will be built with respect to `block_ttd`, whilst the `rig` which imports the -/// chain will operate with respect to `rig_ttd`. This allows for testing mismatched TTDs. -async fn build_optimistic_chain( - block_ttd: u64, - rig_ttd: u64, - num_blocks: usize, -) -> InvalidPayloadRig { - let OptimisticTransitionSetup { - blocks, - execution_block_generator, - } = OptimisticTransitionSetup::new(num_blocks, block_ttd).await; - // Build a brand-new testing harness. We will apply the blocks from the previous harness to - // this one. - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(rig_ttd); - let rig = InvalidPayloadRig::new_with_spec(spec); - - let spec = &rig.harness.chain.spec; - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - - // Ensure all the execution blocks from the first rig are available in the second rig. - *mock_execution_layer.server.execution_block_generator() = execution_block_generator; - - // Make the execution layer respond `SYNCING` to all `newPayload` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_new_payload(true); - // Make the execution layer respond `SYNCING` to all `forkchoiceUpdated` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_forkchoice_updated(); - // Make the execution layer respond `None` to all `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - let current_slot = std::cmp::max( - blocks[0].slot() + spec.safe_slots_to_import_optimistically, - num_blocks.into(), - ); - rig.harness.set_current_slot(current_slot); - - for block in blocks { - rig.harness - .chain - .process_block( - block.canonical_root(), - block, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ) - .await - .unwrap(); - } - - rig.harness.chain.recompute_head_at_current_slot().await; - - // Make the execution layer respond normally to `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - // Perform some sanity checks to ensure that the transition happened exactly where we expected. - let pre_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(0), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let pre_transition_block = rig - .harness - .chain - .get_block(&pre_transition_block_root) - .await - .unwrap() - .unwrap(); - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - assert_eq!( - pre_transition_block_root, - post_transition_block.parent_root(), - "the blocks form a single chain" - ); - assert!( - pre_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has not* undergone the merge transition" - ); - assert!( - !post_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has* undergone the merge transition" - ); - - // Assert that the transition block was optimistically imported. - // - // Note: we're using the "fallback" check for optimistic status, so if the block was - // pre-finality then we'll just use the optimistic status of the finalized block. - assert!( - rig.harness - .chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block(&post_transition_block_root) - .unwrap(), - "the transition block should be imported optimistically" - ); - - // Get the mock execution layer to respond to `getBlockByHash` requests normally again. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - rig -} - -#[tokio::test] -async fn optimistic_transition_block_valid_unfinalized() { - let ttd = 42; - let num_blocks = 16_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_valid_finalized() { - let ttd = 42; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // Make the execution layer respond `None` to all `getBlockByHash` requests to simulate a - // syncing EE. - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - // It should still be marked as optimistic. - assert!(rig - .execution_status(post_transition_block_root) - .is_strictly_optimistic()); - - // the optimistic merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The optimistic merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // Allow the EL to respond to `getBlockByHash`, as if it has finished syncing. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_finalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered yet. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should invalidate merge transition block and shutdown the client"); - - // The beacon chain should have triggered a shutdown. - assert_eq!( - rig.harness.shutdown_reasons(), - vec![ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON - )] - ); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - /// Helper for running tests where we generate a chain with an invalid head and then a /// `fork_block` to recover it. struct InvalidHeadSetup { diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 5d83d65efd2..a241d752fc2 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -112,19 +112,8 @@ async fn light_client_bootstrap_test() { return; }; - let checkpoint_slot = Slot::new(E::slots_per_epoch() * 6); let db_path = tempdir().unwrap(); - let log = test_logger(); - - let seconds_per_slot = spec.seconds_per_slot; - let store = get_store_generic( - &db_path, - StoreConfig { - slots_per_restore_point: 2 * E::slots_per_epoch(), - ..Default::default() - }, - test_spec::(), - ); + let store = get_store_generic(&db_path, StoreConfig::default(), spec.clone()); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); let num_initial_slots = E::slots_per_epoch() * 7; @@ -142,78 +131,8 @@ async fn light_client_bootstrap_test() { ) .await; - let wss_block_root = harness + let finalized_checkpoint = harness .chain - .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness - .chain - .store - .get_full_block(&wss_block_root) - .unwrap() - .unwrap(); - let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); - let wss_state = store - .get_state(&wss_state_root, Some(checkpoint_slot)) - .unwrap() - .unwrap(); - - let kzg = get_kzg(&spec); - - let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); - - // Initialise a new beacon chain from the finalized checkpoint. - // The slot clock must be set to a time ahead of the checkpoint state. - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(seconds_per_slot), - ); - slot_clock.set_slot(harness.get_current_slot().as_u64()); - - let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) - .store(store.clone()) - .custom_spec(test_spec::().into()) - .task_executor(harness.chain.task_executor.clone()) - .logger(log.clone()) - .weak_subjectivity_state( - wss_state, - wss_block.clone(), - wss_blobs_opt.clone(), - genesis_state, - ) - .unwrap() - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .slot_clock(slot_clock) - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .execution_layer(Some(mock.el)) - .build() - .expect("should build"); - - let current_state = harness.get_current_state(); - - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - - let finalized_checkpoint = beacon_chain .canonical_head .cached_head() .finalized_checkpoint(); @@ -248,19 +167,8 @@ async fn light_client_updates_test() { }; let num_final_blocks = E::slots_per_epoch() * 2; - let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); let db_path = tempdir().unwrap(); - let log = test_logger(); - - let seconds_per_slot = spec.seconds_per_slot; - let store = get_store_generic( - &db_path, - StoreConfig { - slots_per_restore_point: 2 * E::slots_per_epoch(), - ..Default::default() - }, - test_spec::(), - ); + let store = get_store_generic(&db_path, StoreConfig::default(), test_spec::()); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); let num_initial_slots = E::slots_per_epoch() * 10; @@ -276,33 +184,6 @@ async fn light_client_updates_test() { ) .await; - let wss_block_root = harness - .chain - .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness - .chain - .store - .get_full_block(&wss_block_root) - .unwrap() - .unwrap(); - let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); - let wss_state = store - .get_state(&wss_state_root, Some(checkpoint_slot)) - .unwrap() - .unwrap(); - - let kzg = get_kzg(&spec); - - let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); - harness.advance_slot(); harness .extend_chain_with_light_client_data( @@ -312,52 +193,8 @@ async fn light_client_updates_test() { ) .await; - // Initialise a new beacon chain from the finalized checkpoint. - // The slot clock must be set to a time ahead of the checkpoint state. - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(seconds_per_slot), - ); - slot_clock.set_slot(harness.get_current_slot().as_u64()); - - let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) - .store(store.clone()) - .custom_spec(test_spec::().into()) - .task_executor(harness.chain.task_executor.clone()) - .logger(log.clone()) - .weak_subjectivity_state( - wss_state, - wss_block.clone(), - wss_blobs_opt.clone(), - genesis_state, - ) - .unwrap() - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .slot_clock(slot_clock) - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .execution_layer(Some(mock.el)) - .build() - .expect("should build"); - - let beacon_chain = Arc::new(beacon_chain); - let current_state = harness.get_current_state(); - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - // calculate the sync period from the previous slot let sync_period = (current_state.slot() - Slot::new(1)) .epoch(E::slots_per_epoch()) @@ -366,7 +203,8 @@ async fn light_client_updates_test() { // fetch a range of light client updates. right now there should only be one light client update // in the db. - let lc_updates = beacon_chain + let lc_updates = harness + .chain .get_light_client_updates(sync_period, 100) .unwrap(); @@ -386,7 +224,8 @@ async fn light_client_updates_test() { .await; // we should now have two light client updates in the db - let lc_updates = beacon_chain + let lc_updates = harness + .chain .get_light_client_updates(sync_period, 100) .unwrap(); @@ -2514,7 +2353,7 @@ async fn pruning_test( } #[tokio::test] -async fn garbage_collect_temp_states_from_failed_block() { +async fn garbage_collect_temp_states_from_failed_block_on_startup() { let db_path = tempdir().unwrap(); // Wrap these functions to ensure the variables are dropped before we try to open another @@ -2571,6 +2410,61 @@ async fn garbage_collect_temp_states_from_failed_block() { assert_eq!(store.iter_temporary_state_roots().count(), 0); } +#[tokio::test] +async fn garbage_collect_temp_states_from_failed_block_on_finalization() { + let db_path = tempdir().unwrap(); + + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let slots_per_epoch = E::slots_per_epoch(); + + let genesis_state = harness.get_current_state(); + let block_slot = Slot::new(2 * slots_per_epoch); + let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; + + let (mut block, _) = (*signed_block).clone().deconstruct(); + + // Mutate the block to make it invalid, and re-sign it. + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = Arc::new(block.sign( + &harness.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + )); + + // The block should be rejected, but should store a bunch of temporary states. + harness.set_current_slot(block_slot); + harness + .process_block_result((block, None)) + .await + .unwrap_err(); + + assert_eq!( + store.iter_temporary_state_roots().count(), + block_slot.as_usize() - 1 + ); + + // Finalize the chain without the block, which should result in pruning of all temporary states. + let blocks_required_to_finalize = 3 * slots_per_epoch; + harness.advance_slot(); + harness + .extend_chain( + blocks_required_to_finalize as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Check that the finalization migration ran. + assert_ne!(store.get_split_slot(), 0); + + // Check that temporary states have been pruned. + assert_eq!(store.iter_temporary_state_roots().count(), 0); +} + #[tokio::test] async fn weak_subjectivity_sync_easy() { let num_initial_slots = E::slots_per_epoch() * 11; @@ -2775,9 +2669,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Forwards iterator from 0 should fail as we lack blocks. assert!(matches!( beacon_chain.forwards_iter_block_roots(Slot::new(0)), - Err(BeaconChainError::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { .. } - )) + Err(BeaconChainError::HistoricalBlockOutOfRange { .. }) )); // Simulate processing of a `StatusMessage` with an older finalized epoch by calling @@ -2845,7 +2737,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { beacon_chain .import_historical_block_batch(batch_with_invalid_first_block) .unwrap_err(), - BeaconChainError::HistoricalBlockError(HistoricalBlockError::InvalidSignature) + HistoricalBlockError::InvalidSignature )); // Importing the batch with valid signatures should succeed. diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 554010be07b..9273137bf6d 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -16,7 +16,7 @@ task_executor = { workspace = true } slot_clock = { workspace = true } lighthouse_network = { workspace = true } types = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } num_cpus = { workspace = true } serde = { workspace = true } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index cd5a1d6cff0..2a69b04c916 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -93,6 +93,11 @@ const DEFAULT_MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * DEFAULT_MAX_WORK_EVENT_Q /// slightly, we don't need to adjust the queues during the lifetime of a process. const ACTIVE_VALIDATOR_COUNT_OVERPROVISION_PERCENT: usize = 110; +/// Minimum size of dynamically sized queues. Due to integer division we don't want 0 length queues +/// as the processor won't process that message type. 128 is an arbitrary value value >= 1 that +/// seems reasonable. +const MIN_QUEUE_LEN: usize = 128; + /// Maximum number of queued items that will be stored before dropping them pub struct BeaconProcessorQueueLengths { aggregate_queue: usize, @@ -130,6 +135,7 @@ pub struct BeaconProcessorQueueLengths { lc_bootstrap_queue: usize, lc_optimistic_update_queue: usize, lc_finality_update_queue: usize, + lc_update_range_queue: usize, api_request_p0_queue: usize, api_request_p1_queue: usize, } @@ -155,9 +161,15 @@ impl BeaconProcessorQueueLengths { aggregate_queue: 4096, unknown_block_aggregate_queue: 1024, // Capacity for a full slot's worth of attestations if subscribed to all subnets - attestation_queue: active_validator_count / slots_per_epoch, + attestation_queue: std::cmp::max( + active_validator_count / slots_per_epoch, + MIN_QUEUE_LEN, + ), // Capacity for a full slot's worth of attestations if subscribed to all subnets - unknown_block_attestation_queue: active_validator_count / slots_per_epoch, + unknown_block_attestation_queue: std::cmp::max( + active_validator_count / slots_per_epoch, + MIN_QUEUE_LEN, + ), sync_message_queue: 2048, sync_contribution_queue: 1024, gossip_voluntary_exit_queue: 4096, @@ -191,6 +203,7 @@ impl BeaconProcessorQueueLengths { lc_bootstrap_queue: 1024, lc_optimistic_update_queue: 512, lc_finality_update_queue: 512, + lc_update_range_queue: 512, api_request_p0_queue: 1024, api_request_p1_queue: 1024, }) @@ -611,6 +624,7 @@ pub enum Work { LightClientBootstrapRequest(BlockingFn), LightClientOptimisticUpdateRequest(BlockingFn), LightClientFinalityUpdateRequest(BlockingFn), + LightClientUpdatesByRangeRequest(BlockingFn), ApiRequestP0(BlockingOrAsync), ApiRequestP1(BlockingOrAsync), } @@ -662,6 +676,7 @@ pub enum WorkType { LightClientBootstrapRequest, LightClientOptimisticUpdateRequest, LightClientFinalityUpdateRequest, + LightClientUpdatesByRangeRequest, ApiRequestP0, ApiRequestP1, } @@ -712,6 +727,7 @@ impl Work { WorkType::LightClientOptimisticUpdateRequest } Work::LightClientFinalityUpdateRequest(_) => WorkType::LightClientFinalityUpdateRequest, + Work::LightClientUpdatesByRangeRequest(_) => WorkType::LightClientUpdatesByRangeRequest, Work::UnknownBlockAttestation { .. } => WorkType::UnknownBlockAttestation, Work::UnknownBlockAggregate { .. } => WorkType::UnknownBlockAggregate, Work::UnknownBlockSamplingRequest { .. } => WorkType::UnknownBlockSamplingRequest, @@ -891,6 +907,7 @@ impl BeaconProcessor { let mut lc_optimistic_update_queue = FifoQueue::new(queue_lengths.lc_optimistic_update_queue); let mut lc_finality_update_queue = FifoQueue::new(queue_lengths.lc_finality_update_queue); + let mut lc_update_range_queue = FifoQueue::new(queue_lengths.lc_update_range_queue); let mut api_request_p0_queue = FifoQueue::new(queue_lengths.api_request_p0_queue); let mut api_request_p1_queue = FifoQueue::new(queue_lengths.api_request_p1_queue); @@ -1368,6 +1385,9 @@ impl BeaconProcessor { Work::LightClientFinalityUpdateRequest { .. } => { lc_finality_update_queue.push(work, work_id, &self.log) } + Work::LightClientUpdatesByRangeRequest { .. } => { + lc_update_range_queue.push(work, work_id, &self.log) + } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) } @@ -1459,6 +1479,7 @@ impl BeaconProcessor { WorkType::LightClientFinalityUpdateRequest => { lc_finality_update_queue.len() } + WorkType::LightClientUpdatesByRangeRequest => lc_update_range_queue.len(), WorkType::ApiRequestP0 => api_request_p0_queue.len(), WorkType::ApiRequestP1 => api_request_p1_queue.len(), }; @@ -1611,7 +1632,8 @@ impl BeaconProcessor { | Work::GossipBlsToExecutionChange(process_fn) | Work::LightClientBootstrapRequest(process_fn) | Work::LightClientOptimisticUpdateRequest(process_fn) - | Work::LightClientFinalityUpdateRequest(process_fn) => { + | Work::LightClientFinalityUpdateRequest(process_fn) + | Work::LightClientUpdatesByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } }; @@ -1686,3 +1708,21 @@ impl Drop for SendOnDrop { } } } + +#[cfg(test)] +mod tests { + use super::*; + use types::{BeaconState, ChainSpec, Eth1Data, ForkName, MainnetEthSpec}; + + #[test] + fn min_queue_len() { + // State with no validators. + let spec = ForkName::latest().make_genesis_spec(ChainSpec::mainnet()); + let genesis_time = 0; + let state = BeaconState::::new(genesis_time, Eth1Data::default(), &spec); + assert_eq!(state.validators().len(), 0); + let queue_lengths = BeaconProcessorQueueLengths::from_state(&state, &spec).unwrap(); + assert_eq!(queue_lengths.attestation_queue, MIN_QUEUE_LEN); + assert_eq!(queue_lengths.unknown_block_attestation_queue, MIN_QUEUE_LEN); + } +} diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 0a7bdba18d1..fc8c712f4e7 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 06f7763c8a4..21a6e42cc50 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -33,7 +33,7 @@ sensitive_url = { workspace = true } genesis = { workspace = true } task_executor = { workspace = true } environment = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } time = "0.3.5" directory = { workspace = true } http_api = { workspace = true } diff --git a/beacon_node/client/src/metrics.rs b/beacon_node/client/src/metrics.rs index ebc4fe70a71..e5c07baddc2 100644 --- a/beacon_node/client/src/metrics.rs +++ b/beacon_node/client/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static SYNC_SLOTS_PER_SECOND: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 632188014eb..839d296c768 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -436,7 +436,7 @@ async fn capella_readiness_logging( .snapshot .beacon_state .fork_name_unchecked() - >= ForkName::Capella; + .capella_enabled(); let has_execution_layer = beacon_chain.execution_layer.is_some(); @@ -496,7 +496,7 @@ async fn deneb_readiness_logging( .snapshot .beacon_state .fork_name_unchecked() - >= ForkName::Deneb; + .deneb_enabled(); let has_execution_layer = beacon_chain.execution_layer.is_some(); diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 4910cfd2e1b..50400a77e06 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -25,7 +25,7 @@ logging = { workspace = true } superstruct = { workspace = true } tokio = { workspace = true } state_processing = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } task_executor = { workspace = true } eth2 = { workspace = true } sensitive_url = { workspace = true } diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs index 9a11e7a6920..1df4ba0df9a 100644 --- a/beacon_node/eth1/src/metrics.rs +++ b/beacon_node/eth1/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index a70a927307d..71ab98a6a20 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -549,10 +549,11 @@ impl Service { /// Returns the number of deposits with valid signatures that have been observed. pub fn get_valid_signature_count(&self) -> Option { + let highest_safe_block = self.highest_safe_block()?; self.deposits() .read() .cache - .get_valid_signature_count(self.highest_safe_block()?) + .get_valid_signature_count(highest_safe_block) } /// Returns the number of deposits with valid signatures that have been observed, without diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 93d8086149d..0ef101fae7c 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -35,7 +35,7 @@ slot_clock = { workspace = true } tempfile = { workspace = true } rand = { workspace = true } zeroize = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } ethers-core = { workspace = true } builder_client = { path = "../builder_client" } fork_choice = { workspace = true } @@ -52,3 +52,4 @@ alloy-rlp = { workspace = true } alloy-consensus = { workspace = true } lighthouse_version = { workspace = true } fixed_bytes = { workspace = true } +sha2 = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 8ba8ecfffbc..1c23c8ba665 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -2,8 +2,7 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, }; @@ -18,7 +17,6 @@ use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; use superstruct::superstruct; -use types::execution_payload::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; pub use types::{ Address, BeaconBlockRef, ConsolidationRequest, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, @@ -26,7 +24,7 @@ pub use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, KzgProofs, + ExecutionPayloadElectra, ExecutionRequests, KzgProofs, }; use types::{Graffiti, GRAFFITI_BYTES_LEN}; @@ -288,6 +286,8 @@ pub struct GetPayloadResponse { pub blobs_bundle: BlobsBundle, #[superstruct(only(Deneb, Electra), partial_getter(copy))] pub should_override_builder: bool, + #[superstruct(only(Electra))] + pub requests: ExecutionRequests, } impl GetPayloadResponse { @@ -321,7 +321,12 @@ impl From> for ExecutionPayload { } impl From> - for (ExecutionPayload, Uint256, Option>) + for ( + ExecutionPayload, + Uint256, + Option>, + Option>, + ) { fn from(response: GetPayloadResponse) -> Self { match response { @@ -329,21 +334,25 @@ impl From> ExecutionPayload::Bellatrix(inner.execution_payload), inner.block_value, None, + None, ), GetPayloadResponse::Capella(inner) => ( ExecutionPayload::Capella(inner.execution_payload), inner.block_value, None, + None, ), GetPayloadResponse::Deneb(inner) => ( ExecutionPayload::Deneb(inner.execution_payload), inner.block_value, Some(inner.blobs_bundle), + None, ), GetPayloadResponse::Electra(inner) => ( ExecutionPayload::Electra(inner.execution_payload), inner.block_value, Some(inner.blobs_bundle), + Some(inner.requests), ), } } @@ -360,106 +369,25 @@ impl GetPayloadResponse { } } -#[superstruct( - variants(V1, V2), - variant_attributes(derive(Clone, Debug),), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") -)] #[derive(Clone, Debug)] -pub struct ExecutionPayloadBody { +pub struct ExecutionPayloadBodyV1 { pub transactions: Transactions, pub withdrawals: Option>, - #[superstruct(only(V2))] - pub deposit_requests: Option>, - #[superstruct(only(V2))] - pub withdrawal_requests: Option>, - #[superstruct(only(V2))] - pub consolidation_requests: Option>, } -impl ExecutionPayloadBody { - #[allow(clippy::type_complexity)] - pub fn deconstruct( - self, - ) -> ( - Transactions, - Option>, - Option>, - Option>, - Option>, - ) { - match self { - ExecutionPayloadBody::V1(body) => { - (body.transactions, body.withdrawals, None, None, None) - } - ExecutionPayloadBody::V2(body) => ( - body.transactions, - body.withdrawals, - body.deposit_requests, - body.withdrawal_requests, - body.consolidation_requests, - ), - } - } +impl ExecutionPayloadBodyV1 { pub fn to_payload( self, header: ExecutionPayloadHeader, ) -> Result, String> { - let header_fork = header.fork_name_unchecked(); - match &self { - Self::V1(_) => { - if header_fork.electra_enabled() { + match header { + ExecutionPayloadHeader::Bellatrix(header) => { + if self.withdrawals.is_some() { return Err(format!( - "block {} is {} but response is ExecutionPayloadBodyV1. Does the EL support {}?", - header.block_hash(), - header_fork, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, + "block {} is bellatrix but payload body has withdrawals", + header.block_hash )); } - } - Self::V2(_) => {} - } - - let ( - transactions, - withdrawals, - deposit_requests, - withdrawal_requests, - consolidation_requests, - ) = self.deconstruct(); - if !header_fork.capella_enabled() && withdrawals.is_some() { - return Err(format!( - "block {} is {} but payload body has withdrawals", - header.block_hash(), - header_fork - )); - } - if !header_fork.electra_enabled() { - if deposit_requests.is_some() { - return Err(format!( - "block {} is {} but payload body has deposit_requests", - header.block_hash(), - header_fork - )); - } - if withdrawal_requests.is_some() { - return Err(format!( - "block {} is {} but payload body has withdrawal_requests", - header.block_hash(), - header_fork - )); - } - if consolidation_requests.is_some() { - return Err(format!( - "block {} is {} but payload body has consolidation_requests", - header.block_hash(), - header_fork - )); - } - } - - match header { - ExecutionPayloadHeader::Bellatrix(header) => { Ok(ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { parent_hash: header.parent_hash, fee_recipient: header.fee_recipient, @@ -474,108 +402,90 @@ impl ExecutionPayloadBody { extra_data: header.extra_data, base_fee_per_gas: header.base_fee_per_gas, block_hash: header.block_hash, - transactions, + transactions: self.transactions, })) } ExecutionPayloadHeader::Capella(header) => { - let withdrawals = withdrawals.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawals set to null", - header.block_hash, header_fork - ) - })?; - Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions, - withdrawals, - })) + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + })) + } else { + Err(format!( + "block {} is capella but payload body doesn't have withdrawals", + header.block_hash + )) + } } ExecutionPayloadHeader::Deneb(header) => { - let withdrawals = withdrawals.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawals set to null", - header.block_hash, header_fork - ) - })?; - Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions, - withdrawals, - blob_gas_used: header.blob_gas_used, - excess_blob_gas: header.excess_blob_gas, - })) + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } } ExecutionPayloadHeader::Electra(header) => { - let withdrawals = withdrawals.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawals set to null", - header.block_hash, header_fork - ) - })?; - let deposit_requests = deposit_requests.ok_or_else(|| { - format!( - "block {} is {} but payload body has deposit_requests set to null", - header.block_hash, header_fork - ) - })?; - let withdrawal_requests = withdrawal_requests.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawal_requests set to null", - header.block_hash, header_fork - ) - })?; - let consolidation_requests = consolidation_requests.ok_or_else(|| { - format!( - "block {} is {} but payload body has consolidation_requests set to null", - header.block_hash, header_fork - ) - })?; - Ok(ExecutionPayload::Electra(ExecutionPayloadElectra { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions, - withdrawals, - blob_gas_used: header.blob_gas_used, - excess_blob_gas: header.excess_blob_gas, - deposit_requests, - withdrawal_requests, - consolidation_requests, - })) + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Electra(ExecutionPayloadElectra { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } } } } @@ -592,8 +502,6 @@ pub struct EngineCapabilities { pub forkchoice_updated_v3: bool, pub get_payload_bodies_by_hash_v1: bool, pub get_payload_bodies_by_range_v1: bool, - pub get_payload_bodies_by_hash_v2: bool, - pub get_payload_bodies_by_range_v2: bool, pub get_payload_v1: bool, pub get_payload_v2: bool, pub get_payload_v3: bool, @@ -631,12 +539,6 @@ impl EngineCapabilities { if self.get_payload_bodies_by_range_v1 { response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1); } - if self.get_payload_bodies_by_hash_v2 { - response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2); - } - if self.get_payload_bodies_by_range_v2 { - response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2); - } if self.get_payload_v1 { response.push(ENGINE_GET_PAYLOAD_V1); } @@ -668,6 +570,7 @@ pub enum ClientCode { Lodestar, Nethermind, Nimbus, + TrinExecution, Teku, Prysm, Reth, @@ -686,6 +589,7 @@ impl std::fmt::Display for ClientCode { ClientCode::Lodestar => "LS", ClientCode::Nethermind => "NM", ClientCode::Nimbus => "NB", + ClientCode::TrinExecution => "TE", ClientCode::Teku => "TK", ClientCode::Prysm => "PM", ClientCode::Reth => "RH", @@ -709,6 +613,7 @@ impl TryFrom for ClientCode { "LS" => Ok(Self::Lodestar), "NM" => Ok(Self::Nethermind), "NB" => Ok(Self::Nimbus), + "TE" => Ok(Self::TrinExecution), "TK" => Ok(Self::Teku), "PM" => Ok(Self::Prysm), "RH" => Ok(Self::Reth), diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c497a4a7254..9c2c43bcf7c 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -50,8 +50,6 @@ pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; -pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2: &str = "engine_getPayloadBodiesByHashV2"; -pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2: &str = "engine_getPayloadBodiesByRangeV2"; pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; @@ -80,8 +78,6 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, ENGINE_GET_CLIENT_VERSION_V1, ]; @@ -797,6 +793,9 @@ impl HttpJsonRpc { JsonExecutionPayload::V4(new_payload_request_electra.execution_payload.clone().into()), new_payload_request_electra.versioned_hashes, new_payload_request_electra.parent_beacon_block_root, + new_payload_request_electra + .execution_requests_list + .get_execution_requests_list(), ]); let response: JsonPayloadStatusV1 = self @@ -849,7 +848,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V1(response).into()) + JsonGetPayloadResponse::V1(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Capella => { let response: JsonGetPayloadResponseV2 = self @@ -859,7 +860,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V2(response).into()) + JsonGetPayloadResponse::V2(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Base | ForkName::Altair | ForkName::Deneb | ForkName::Electra => Err( Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), @@ -883,7 +886,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V3(response).into()) + JsonGetPayloadResponse::V3(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Base | ForkName::Altair @@ -912,7 +917,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V4(response).into()) + JsonGetPayloadResponse::V4(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Base | ForkName::Altair @@ -991,7 +998,7 @@ impl HttpJsonRpc { pub async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let params = json!([block_hashes]); let response: Vec>> = self @@ -1004,27 +1011,7 @@ impl HttpJsonRpc { Ok(response .into_iter() - .map(|opt_json| opt_json.map(|v1| JsonExecutionPayloadBody::V1(v1).into())) - .collect()) - } - - pub async fn get_payload_bodies_by_hash_v2( - &self, - block_hashes: Vec, - ) -> Result>>, Error> { - let params = json!([block_hashes]); - - let response: Vec>> = self - .rpc_request( - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, - params, - ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - - Ok(response - .into_iter() - .map(|opt_json| opt_json.map(|v2| JsonExecutionPayloadBody::V2(v2).into())) + .map(|opt_json| opt_json.map(From::from)) .collect()) } @@ -1032,7 +1019,7 @@ impl HttpJsonRpc { &self, start: u64, count: u64, - ) -> Result>>, Error> { + ) -> Result>>, Error> { #[derive(Serialize)] #[serde(transparent)] struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); @@ -1048,31 +1035,7 @@ impl HttpJsonRpc { Ok(response .into_iter() - .map(|opt_json| opt_json.map(|v1| JsonExecutionPayloadBody::V1(v1).into())) - .collect()) - } - - pub async fn get_payload_bodies_by_range_v2( - &self, - start: u64, - count: u64, - ) -> Result>>, Error> { - #[derive(Serialize)] - #[serde(transparent)] - struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); - - let params = json!([Quantity(start), Quantity(count)]); - let response: Vec>> = self - .rpc_request( - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, - params, - ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - - Ok(response - .into_iter() - .map(|opt_json| opt_json.map(|v2| JsonExecutionPayloadBody::V2(v2).into())) + .map(|opt_json| opt_json.map(From::from)) .collect()) } @@ -1099,10 +1062,6 @@ impl HttpJsonRpc { .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), get_payload_bodies_by_range_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), - get_payload_bodies_by_hash_v2: capabilities - .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2), - get_payload_bodies_by_range_v2: capabilities - .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), @@ -1278,39 +1237,6 @@ impl HttpJsonRpc { } } - pub async fn get_payload_bodies_by_hash( - &self, - block_hashes: Vec, - ) -> Result>>, Error> { - let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.get_payload_bodies_by_hash_v2 { - self.get_payload_bodies_by_hash_v2(block_hashes).await - } else if engine_capabilities.get_payload_bodies_by_hash_v1 { - self.get_payload_bodies_by_hash_v1(block_hashes).await - } else { - Err(Error::RequiredMethodUnsupported( - "engine_getPayloadBodiesByHash", - )) - } - } - - pub async fn get_payload_bodies_by_range( - &self, - start: u64, - count: u64, - ) -> Result>>, Error> { - let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.get_payload_bodies_by_range_v2 { - self.get_payload_bodies_by_range_v2(start, count).await - } else if engine_capabilities.get_payload_bodies_by_range_v1 { - self.get_payload_bodies_by_range_v1(start, count).await - } else { - Err(Error::RequiredMethodUnsupported( - "engine_getPayloadBodiesByRange", - )) - } - } - // automatically selects the latest version of // forkchoice_updated that the execution engine supports pub async fn forkchoice_updated( diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index a05d584cfca..753554c149a 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,11 +1,13 @@ use super::*; use alloy_rlp::RlpEncodable; use serde::{Deserialize, Serialize}; +use ssz::Decode; use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; -use types::{DepositRequest, FixedVector, PublicKeyBytes, Signature, Unsigned, WithdrawalRequest}; +use types::execution_requests::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; +use types::{FixedVector, Unsigned}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -104,14 +106,6 @@ pub struct JsonExecutionPayload { #[superstruct(only(V3, V4))] #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, - #[superstruct(only(V4))] - pub deposit_requests: VariableList, - #[superstruct(only(V4))] - pub withdrawal_requests: - VariableList, - #[superstruct(only(V4))] - pub consolidation_requests: - VariableList, } impl From> for JsonExecutionPayloadV1 { @@ -214,24 +208,6 @@ impl From> for JsonExecutionPayloadV4 .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_requests: payload - .deposit_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - withdrawal_requests: payload - .withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - consolidation_requests: payload - .consolidation_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), } } } @@ -348,24 +324,6 @@ impl From> for ExecutionPayloadElectra .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_requests: payload - .deposit_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - withdrawal_requests: payload - .withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - consolidation_requests: payload - .consolidation_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), } } } @@ -381,6 +339,71 @@ impl From> for ExecutionPayload { } } +/// This is used to index into the `execution_requests` array. +#[derive(Debug, Copy, Clone)] +enum RequestPrefix { + Deposit, + Withdrawal, + Consolidation, +} + +impl RequestPrefix { + pub fn from_prefix(prefix: u8) -> Option { + match prefix { + 0 => Some(Self::Deposit), + 1 => Some(Self::Withdrawal), + 2 => Some(Self::Consolidation), + _ => None, + } + } +} + +/// Format of `ExecutionRequests` received over the engine api. +/// +/// Array of ssz-encoded requests list encoded as hex bytes. +/// The prefix of the request type is used to index into the array. +/// +/// For e.g. [0xab, 0xcd, 0xef] +/// Here, 0xab are the deposits bytes (prefix and index == 0) +/// 0xcd are the withdrawals bytes (prefix and index == 1) +/// 0xef are the consolidations bytes (prefix and index == 2) +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct JsonExecutionRequests(pub Vec); + +impl TryFrom for ExecutionRequests { + type Error = String; + + fn try_from(value: JsonExecutionRequests) -> Result { + let mut requests = ExecutionRequests::default(); + + for (i, request) in value.0.into_iter().enumerate() { + // hex string + let decoded_bytes = hex::decode(request).map_err(|e| format!("Invalid hex {:?}", e))?; + match RequestPrefix::from_prefix(i as u8) { + Some(RequestPrefix::Deposit) => { + requests.deposits = DepositRequests::::from_ssz_bytes(&decoded_bytes) + .map_err(|e| format!("Failed to decode DepositRequest from EL: {:?}", e))?; + } + Some(RequestPrefix::Withdrawal) => { + requests.withdrawals = WithdrawalRequests::::from_ssz_bytes(&decoded_bytes) + .map_err(|e| { + format!("Failed to decode WithdrawalRequest from EL: {:?}", e) + })?; + } + Some(RequestPrefix::Consolidation) => { + requests.consolidations = + ConsolidationRequests::::from_ssz_bytes(&decoded_bytes).map_err( + |e| format!("Failed to decode ConsolidationRequest from EL: {:?}", e), + )?; + } + None => return Err("Empty requests string".to_string()), + } + } + Ok(requests) + } +} + #[superstruct( variants(V1, V2, V3, V4), variant_attributes( @@ -407,38 +430,42 @@ pub struct JsonGetPayloadResponse { pub blobs_bundle: JsonBlobsBundleV1, #[superstruct(only(V3, V4))] pub should_override_builder: bool, + #[superstruct(only(V4))] + pub requests: JsonExecutionRequests, } -impl From> for GetPayloadResponse { - fn from(json_get_payload_response: JsonGetPayloadResponse) -> Self { +impl TryFrom> for GetPayloadResponse { + type Error = String; + fn try_from(json_get_payload_response: JsonGetPayloadResponse) -> Result { match json_get_payload_response { JsonGetPayloadResponse::V1(response) => { - GetPayloadResponse::Bellatrix(GetPayloadResponseBellatrix { + Ok(GetPayloadResponse::Bellatrix(GetPayloadResponseBellatrix { execution_payload: response.execution_payload.into(), block_value: response.block_value, - }) + })) } JsonGetPayloadResponse::V2(response) => { - GetPayloadResponse::Capella(GetPayloadResponseCapella { + Ok(GetPayloadResponse::Capella(GetPayloadResponseCapella { execution_payload: response.execution_payload.into(), block_value: response.block_value, - }) + })) } JsonGetPayloadResponse::V3(response) => { - GetPayloadResponse::Deneb(GetPayloadResponseDeneb { + Ok(GetPayloadResponse::Deneb(GetPayloadResponseDeneb { execution_payload: response.execution_payload.into(), block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - }) + })) } JsonGetPayloadResponse::V4(response) => { - GetPayloadResponse::Electra(GetPayloadResponseElectra { + Ok(GetPayloadResponse::Electra(GetPayloadResponseElectra { execution_payload: response.execution_payload.into(), block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - }) + requests: response.requests.try_into()?, + })) } } } @@ -754,36 +781,20 @@ impl From for JsonForkchoiceUpdatedV1Response { } } -#[superstruct( - variants(V1, V2), - variant_attributes( - derive(Clone, Debug, Serialize, Deserialize), - serde(bound = "E: EthSpec", rename_all = "camelCase"), - ), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") -)] -#[derive(Clone, Debug, Serialize)] -#[serde(bound = "E: EthSpec", rename_all = "camelCase", untagged)] -pub struct JsonExecutionPayloadBody { +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct JsonExecutionPayloadBodyV1 { #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, pub withdrawals: Option>, - #[superstruct(only(V2))] - pub deposit_requests: Option>, - #[superstruct(only(V2))] - pub withdrawal_requests: - Option>, - #[superstruct(only(V2))] - pub consolidation_requests: - Option>, } -impl From> for JsonExecutionPayloadBodyV1 { - fn from(value: ExecutionPayloadBodyV1) -> Self { +impl From> for ExecutionPayloadBodyV1 { + fn from(value: JsonExecutionPayloadBodyV1) -> Self { Self { transactions: value.transactions, withdrawals: value.withdrawals.map(|json_withdrawals| { - VariableList::from( + Withdrawals::::from( json_withdrawals .into_iter() .map(Into::into) @@ -794,82 +805,12 @@ impl From> for JsonExecutionPayloadBodyV1< } } -impl From> for JsonExecutionPayloadBodyV2 { - fn from(value: ExecutionPayloadBodyV2) -> Self { +impl From> for JsonExecutionPayloadBodyV1 { + fn from(value: ExecutionPayloadBodyV1) -> Self { Self { transactions: value.transactions, - withdrawals: value.withdrawals.map(|json_withdrawals| { - VariableList::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - deposit_requests: value.deposit_requests.map(|receipts| { - VariableList::from(receipts.into_iter().map(Into::into).collect::>()) - }), - withdrawal_requests: value.withdrawal_requests.map(|withdrawal_requests| { - VariableList::from( - withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - consolidation_requests: value.consolidation_requests.map(|consolidation_requests| { - VariableList::from( - consolidation_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - } - } -} - -impl From> for ExecutionPayloadBody { - fn from(value: JsonExecutionPayloadBody) -> Self { - match value { - JsonExecutionPayloadBody::V1(body_v1) => Self::V1(ExecutionPayloadBodyV1 { - transactions: body_v1.transactions, - withdrawals: body_v1.withdrawals.map(|json_withdrawals| { - Withdrawals::::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - }), - JsonExecutionPayloadBody::V2(body_v2) => Self::V2(ExecutionPayloadBodyV2 { - transactions: body_v2.transactions, - withdrawals: body_v2.withdrawals.map(|json_withdrawals| { - Withdrawals::::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - deposit_requests: body_v2.deposit_requests.map(|json_receipts| { - DepositRequests::::from( - json_receipts - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - withdrawal_requests: body_v2.withdrawal_requests.map(|json_withdrawal_requests| { - WithdrawalRequests::::from( - json_withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - consolidation_requests: body_v2.consolidation_requests, + withdrawals: value.withdrawals.map(|withdrawals| { + VariableList::from(withdrawals.into_iter().map(Into::into).collect::>()) }), } } @@ -950,96 +891,3 @@ impl TryFrom for ClientVersionV1 { }) } } - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct JsonDepositRequest { - pub pubkey: PublicKeyBytes, - pub withdrawal_credentials: Hash256, - #[serde(with = "serde_utils::u64_hex_be")] - pub amount: u64, - pub signature: Signature, - #[serde(with = "serde_utils::u64_hex_be")] - pub index: u64, -} - -impl From for JsonDepositRequest { - fn from(deposit: DepositRequest) -> Self { - Self { - pubkey: deposit.pubkey, - withdrawal_credentials: deposit.withdrawal_credentials, - amount: deposit.amount, - signature: deposit.signature, - index: deposit.index, - } - } -} - -impl From for DepositRequest { - fn from(json_deposit: JsonDepositRequest) -> Self { - Self { - pubkey: json_deposit.pubkey, - withdrawal_credentials: json_deposit.withdrawal_credentials, - amount: json_deposit.amount, - signature: json_deposit.signature, - index: json_deposit.index, - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct JsonWithdrawalRequest { - pub source_address: Address, - pub validator_pubkey: PublicKeyBytes, - #[serde(with = "serde_utils::u64_hex_be")] - pub amount: u64, -} - -impl From for JsonWithdrawalRequest { - fn from(withdrawal_request: WithdrawalRequest) -> Self { - Self { - source_address: withdrawal_request.source_address, - validator_pubkey: withdrawal_request.validator_pubkey, - amount: withdrawal_request.amount, - } - } -} - -impl From for WithdrawalRequest { - fn from(json_withdrawal_request: JsonWithdrawalRequest) -> Self { - Self { - source_address: json_withdrawal_request.source_address, - validator_pubkey: json_withdrawal_request.validator_pubkey, - amount: json_withdrawal_request.amount, - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct JsonConsolidationRequest { - pub source_address: Address, - pub source_pubkey: PublicKeyBytes, - pub target_pubkey: PublicKeyBytes, -} - -impl From for JsonConsolidationRequest { - fn from(consolidation_request: ConsolidationRequest) -> Self { - Self { - source_address: consolidation_request.source_address, - source_pubkey: consolidation_request.source_pubkey, - target_pubkey: consolidation_request.target_pubkey, - } - } -} - -impl From for ConsolidationRequest { - fn from(json_consolidation_request: JsonConsolidationRequest) -> Self { - Self { - source_address: json_consolidation_request.source_address, - source_pubkey: json_consolidation_request.source_pubkey, - target_pubkey: json_consolidation_request.target_pubkey, - } - } -} diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 8d2e3d5ad06..318779b7f3e 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -9,7 +9,7 @@ use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, + ExecutionPayloadElectra, ExecutionRequests, }; #[superstruct( @@ -43,6 +43,8 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { pub versioned_hashes: Vec, #[superstruct(only(Deneb, Electra))] pub parent_beacon_block_root: Hash256, + #[superstruct(only(Electra))] + pub execution_requests_list: &'block ExecutionRequests, } impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { @@ -183,6 +185,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .map(kzg_commitment_to_versioned_hash) .collect(), parent_beacon_block_root: block_ref.parent_root, + execution_requests_list: &block_ref.body.execution_requests, })), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 648963a320e..f7e490233fe 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -48,7 +48,8 @@ use types::builder_bid::BuilderBid; use types::non_zero_usize::new_non_zero_usize; use types::payload::BlockProductionVersion; use types::{ - AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock, + AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, ExecutionRequests, KzgProofs, + SignedBlindedBeaconBlock, }; use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadBellatrix, @@ -112,12 +113,15 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { payload: ExecutionPayloadHeader::Electra(builder_bid.header).into(), block_value: builder_bid.value, kzg_commitments: builder_bid.blob_kzg_commitments, blobs_and_proofs: None, + // TODO(electra): update this with builder api returning the requests + requests: None, }, }; Ok(ProvenancedPayload::Builder( @@ -194,6 +198,8 @@ pub enum BlockProposalContents> { kzg_commitments: KzgCommitments, /// `None` for blinded `PayloadAndBlobs`. blobs_and_proofs: Option<(BlobsList, KzgProofs)>, + // TODO(electra): this should probably be a separate variant/superstruct + requests: Option>, }, } @@ -214,11 +220,13 @@ impl From>> block_value, kzg_commitments, blobs_and_proofs: _, + requests, } => BlockProposalContents::PayloadAndBlobs { payload: payload.execution_payload().into(), block_value, kzg_commitments, blobs_and_proofs: None, + requests, }, } } @@ -230,13 +238,14 @@ impl> TryFrom> type Error = Error; fn try_from(response: GetPayloadResponse) -> Result { - let (execution_payload, block_value, maybe_bundle) = response.into(); + let (execution_payload, block_value, maybe_bundle, maybe_requests) = response.into(); match maybe_bundle { Some(bundle) => Ok(Self::PayloadAndBlobs { payload: execution_payload.into(), block_value, kzg_commitments: bundle.commitments, blobs_and_proofs: Some((bundle.blobs, bundle.proofs)), + requests: maybe_requests, }), None => Ok(Self::Payload { payload: execution_payload.into(), @@ -265,22 +274,25 @@ impl> BlockProposalContents>, Option<(BlobsList, KzgProofs)>, + Option>, Uint256, ) { match self { Self::Payload { payload, block_value, - } => (payload, None, None, block_value), + } => (payload, None, None, None, block_value), Self::PayloadAndBlobs { payload, block_value, kzg_commitments, blobs_and_proofs, + requests, } => ( payload, Some(kzg_commitments), blobs_and_proofs, + requests, block_value, ), } @@ -1772,10 +1784,10 @@ impl ExecutionLayer { pub async fn get_payload_bodies_by_hash( &self, hashes: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { self.engine() .request(|engine: &Engine| async move { - engine.api.get_payload_bodies_by_hash(hashes).await + engine.api.get_payload_bodies_by_hash_v1(hashes).await }) .await .map_err(Box::new) @@ -1786,11 +1798,14 @@ impl ExecutionLayer { &self, start: u64, count: u64, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE); self.engine() .request(|engine: &Engine| async move { - engine.api.get_payload_bodies_by_range(start, count).await + engine + .api + .get_payload_bodies_by_range_v1(start, count) + .await }) .await .map_err(Box::new) @@ -1823,9 +1838,7 @@ impl ExecutionLayer { // Use efficient payload bodies by range method if supported. let capabilities = self.get_engine_capabilities(None).await?; - if capabilities.get_payload_bodies_by_range_v1 - || capabilities.get_payload_bodies_by_range_v2 - { + if capabilities.get_payload_bodies_by_range_v1 { let mut payload_bodies = self.get_payload_bodies_by_range(block_number, 1).await?; if payload_bodies.len() != 1 { diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 184031af4d0..ab1a22677f3 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub const HIT: &str = "hit"; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index a5960744f5c..4deb91e0567 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -652,30 +652,23 @@ impl ExecutionBlockGenerator { withdrawals: pa.withdrawals.clone().into(), blob_gas_used: 0, excess_blob_gas: 0, - // TODO(electra): consider how to test these fields below - deposit_requests: vec![].into(), - withdrawal_requests: vec![].into(), - consolidation_requests: vec![].into(), }), _ => unreachable!(), }, }; - match execution_payload.fork_name() { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => {} - ForkName::Deneb | ForkName::Electra => { - // get random number between 0 and Max Blobs - let mut rng = self.rng.lock(); - let num_blobs = rng.gen::() % (E::max_blobs_per_block() + 1); - let (bundle, transactions) = generate_blobs(num_blobs)?; - for tx in Vec::from(transactions) { - execution_payload - .transactions_mut() - .push(tx) - .map_err(|_| "transactions are full".to_string())?; - } - self.blobs_bundles.insert(id, bundle); + if execution_payload.fork_name().deneb_enabled() { + // get random number between 0 and Max Blobs + let mut rng = self.rng.lock(); + let num_blobs = rng.gen::() % (E::max_blobs_per_block() + 1); + let (bundle, transactions) = generate_blobs(num_blobs)?; + for tx in Vec::from(transactions) { + execution_payload + .transactions_mut() + .push(tx) + .map_err(|_| "transactions are full".to_string())?; } + self.blobs_bundles.insert(id, bundle); } *execution_payload.block_hash_mut() = diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index f36cb9797d3..786ac9ad9c9 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -373,6 +373,8 @@ pub async fn handle_rpc( ))? .into(), should_override_builder: false, + // TODO(electra): add EL requests in mock el + requests: Default::default(), }) .unwrap() } @@ -561,60 +563,11 @@ pub async fn handle_rpc( match maybe_payload { Some(payload) => { - assert!( - !payload.fork_name().electra_enabled(), - "payload bodies V1 is not supported for Electra blocks" - ); - let payload_body = ExecutionPayloadBodyV1 { + let payload_body: ExecutionPayloadBodyV1 = ExecutionPayloadBodyV1 { transactions: payload.transactions().clone(), withdrawals: payload.withdrawals().ok().cloned(), }; - let json_payload_body = JsonExecutionPayloadBody::V1( - JsonExecutionPayloadBodyV1::::from(payload_body), - ); - response.push(Some(json_payload_body)); - } - None => response.push(None), - } - } - - Ok(serde_json::to_value(response).unwrap()) - } - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2 => { - #[derive(Deserialize)] - #[serde(transparent)] - struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] pub u64); - - let start = get_param::(params, 0) - .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? - .0; - let count = get_param::(params, 1) - .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? - .0; - - let mut response = vec![]; - for block_num in start..(start + count) { - let maybe_payload = ctx - .execution_block_generator - .read() - .execution_payload_by_number(block_num); - - match maybe_payload { - Some(payload) => { - // TODO(electra): add testing for: - // deposit_requests - // withdrawal_requests - // consolidation_requests - let payload_body = ExecutionPayloadBodyV2 { - transactions: payload.transactions().clone(), - withdrawals: payload.withdrawals().ok().cloned(), - deposit_requests: payload.deposit_requests().ok().cloned(), - withdrawal_requests: payload.withdrawal_requests().ok().cloned(), - consolidation_requests: payload.consolidation_requests().ok().cloned(), - }; - let json_payload_body = JsonExecutionPayloadBody::V2( - JsonExecutionPayloadBodyV2::::from(payload_body), - ); + let json_payload_body = JsonExecutionPayloadBodyV1::from(payload_body); response.push(Some(json_payload_body)); } None => response.push(None), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 1291c8cf97b..341daedbc8d 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -20,9 +20,9 @@ use types::builder_bid::{ }; use types::{ Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, FixedBytesExtended, ForkName, ForkVersionedResponse, Hash256, - PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, - SignedValidatorRegistrationData, Slot, Uint256, + ExecutionPayloadHeaderRefMut, ExecutionRequests, FixedBytesExtended, ForkName, + ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, + SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; use types::{ExecutionBlockHash, SecretKey}; use warp::{Filter, Rejection}; @@ -479,16 +479,18 @@ pub fn serve( let prev_randao = head_state .get_randao_mix(head_state.current_epoch()) .map_err(|_| reject("couldn't get prev randao"))?; - let expected_withdrawals = match fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix => None, - ForkName::Capella | ForkName::Deneb | ForkName::Electra => Some( + + let expected_withdrawals = if fork.capella_enabled() { + Some( builder .beacon_client .get_expected_withdrawals(&StateId::Head) .await .unwrap() .data, - ), + ) + } else { + None }; let payload_attributes = match fork { @@ -540,10 +542,12 @@ pub fn serve( let mut message = match payload_response_type { crate::GetPayloadResponseType::Full(payload_response) => { - let (payload, _block_value, maybe_blobs_bundle): ( + #[allow(clippy::type_complexity)] + let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( ExecutionPayload, Uint256, Option>, + Option>, ) = payload_response.into(); match fork { @@ -591,10 +595,12 @@ pub fn serve( } } crate::GetPayloadResponseType::Blinded(payload_response) => { - let (payload, _block_value, maybe_blobs_bundle): ( + #[allow(clippy::type_complexity)] + let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( ExecutionPayload, Uint256, Option>, + Option>, ) = payload_response.into(); match fork { ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index fe847ec3eda..be99b380543 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -47,9 +47,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { forkchoice_updated_v2: true, forkchoice_updated_v3: true, get_payload_bodies_by_hash_v1: true, - get_payload_bodies_by_hash_v2: true, get_payload_bodies_by_range_v1: true, - get_payload_bodies_by_range_v2: true, get_payload_v1: true, get_payload_v2: true, get_payload_v3: true, diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index f3779f0e4ac..638fe0f2192 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -20,7 +20,7 @@ lighthouse_network = { workspace = true } eth1 = { workspace = true } state_processing = { workspace = true } lighthouse_version = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } warp_utils = { workspace = true } slot_clock = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs index 05a6735b327..c2ccb6695eb 100644 --- a/beacon_node/http_api/src/build_block_contents.rs +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -11,11 +11,9 @@ pub fn build_block_contents( BeaconBlockResponseWrapper::Blinded(block) => { Ok(ProduceBlockV3Response::Blinded(block.block)) } - BeaconBlockResponseWrapper::Full(block) => match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => Ok( - ProduceBlockV3Response::Full(FullBlockContents::Block(block.block)), - ), - ForkName::Deneb | ForkName::Electra => { + + BeaconBlockResponseWrapper::Full(block) => { + if fork_name.deneb_enabled() { let BeaconBlockResponse { block, state: _, @@ -37,7 +35,11 @@ pub fn build_block_contents( blobs, }), )) + } else { + Ok(ProduceBlockV3Response::Full(FullBlockContents::Block( + block.block, + ))) } - }, + } } } diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs index 54f2c0efa8d..40b38157365 100644 --- a/beacon_node/http_api/src/builder_states.rs +++ b/beacon_node/http_api/src/builder_states.rs @@ -4,7 +4,7 @@ use safe_arith::SafeArith; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::state_advance::partial_state_advance; use std::sync::Arc; -use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals}; +use types::{BeaconState, EthSpec, Slot, Withdrawals}; const MAX_EPOCH_LOOKAHEAD: u64 = 2; @@ -53,7 +53,8 @@ fn get_next_withdrawals_sanity_checks( } let fork = chain.spec.fork_name_at_slot::(proposal_slot); - if let ForkName::Base | ForkName::Altair | ForkName::Bellatrix = fork { + + if !fork.capella_enabled() { return Err(warp_utils::reject::custom_bad_request( "the specified state is a pre-capella state.".to_string(), )); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ffcfda46803..307584b82d4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -146,7 +146,6 @@ pub struct Config { pub listen_port: u16, pub allow_origin: Option, pub tls_config: Option, - pub spec_fork_name: Option, pub data_dir: PathBuf, pub sse_capacity_multiplier: usize, pub enable_beacon_processor: bool, @@ -164,7 +163,6 @@ impl Default for Config { listen_port: 5052, allow_origin: None, tls_config: None, - spec_fork_name: None, data_dir: PathBuf::from(DEFAULT_ROOT_DIR), sse_capacity_multiplier: 1, enable_beacon_processor: true, @@ -2643,7 +2641,6 @@ pub fn serve( ); // GET config/spec - let spec_fork_name = ctx.config.spec_fork_name; let get_config_spec = config_path .and(warp::path("spec")) .and(warp::path::end()) @@ -2653,7 +2650,7 @@ pub fn serve( move |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { let config_and_preset = - ConfigAndPreset::from_chain_spec::(&chain.spec, spec_fork_name); + ConfigAndPreset::from_chain_spec::(&chain.spec, None); Ok(api_types::GenericResponse::from(config_and_preset)) }) }, diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 970eef8dd07..b6a53b26c69 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static HTTP_API_PATHS_TOTAL: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 16364b435a9..fceeb2dd231 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -389,18 +389,17 @@ pub async fn publish_block>( .count() > 0 { - let custody_columns_indices = &network_globals.custody_columns; - - let custody_columns = gossip_verified_data_columns + let sampling_columns_indices = &network_globals.sampling_columns; + let sampling_columns = gossip_verified_data_columns .into_iter() .flatten() - .filter(|data_column| custody_columns_indices.contains(&data_column.index())) + .filter(|data_column| sampling_columns_indices.contains(&data_column.index())) .collect(); // Importing the columns could trigger block import and network publication in the case // where the block was already seen on gossip. if let Err(e) = - Box::pin(chain.process_gossip_data_columns(custody_columns, publish_fn)).await + Box::pin(chain.process_gossip_data_columns(sampling_columns, publish_fn)).await { let msg = format!("Invalid data column: {e}"); return if let BroadcastValidation::Gossip = validation_level { diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index f835d13fb66..97ba72a2ac6 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -14,7 +14,7 @@ beacon_chain = { workspace = true } store = { workspace = true } lighthouse_network = { workspace = true } slot_clock = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } lighthouse_version = { workspace = true } warp_utils = { workspace = true } malloc_utils = { workspace = true } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index d68efff4320..d751c51e4c9 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,8 +1,8 @@ use crate::Context; use beacon_chain::BeaconChainTypes; -use lighthouse_metrics::TextEncoder; use lighthouse_network::prometheus_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; +use metrics::TextEncoder; pub fn gather_prometheus_metrics( ctx: &Context, @@ -17,13 +17,13 @@ pub fn gather_prometheus_metrics( // - Statically updated: things which are only updated at the time of the scrape (used where we // can avoid cluttering up code with metrics calls). // - // The `lighthouse_metrics` crate has a `DEFAULT_REGISTRY` global singleton + // The `metrics` crate has a `DEFAULT_REGISTRY` global singleton // which keeps the state of all the metrics. Dynamically updated things will already be // up-to-date in the registry (because they update themselves) however statically updated // things need to be "scraped". // // We proceed by, first updating all the static metrics using `scrape_for_metrics(..)`. Then, - // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into + // using `metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into // a string that can be returned via HTTP. if let Some(beacon_chain) = ctx.chain.as_ref() { @@ -48,7 +48,7 @@ pub fn gather_prometheus_metrics( } encoder - .encode_utf8(&lighthouse_metrics::gather(), &mut buffer) + .encode_utf8(&metrics::gather(), &mut buffer) .unwrap(); // encode gossipsub metrics also if they exist if let Some(registry) = ctx.gossipsub_registry.as_ref() { diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index b0f5b9a5e1c..c4fad997025 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -21,7 +21,7 @@ futures = { workspace = true } error-chain = { workspace = true } dirs = { workspace = true } fnv = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } smallvec = { workspace = true } tokio-io-timeout = "1" lru = { workspace = true } diff --git a/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md b/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md index 006eb20a70f..aba85f61842 100644 --- a/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md +++ b/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md @@ -2,6 +2,9 @@ - Remove the beta tag from the v1.2 upgrade. See [PR 6344](https://github.com/sigp/lighthouse/pull/6344) +- Correct state inconsistencies with the mesh and connected peers due to the fanout mapping. + See [PR 6244](https://github.com/sigp/lighthouse/pull/6244) + - Implement IDONTWANT messages as per [spec](https://github.com/libp2p/specs/pull/548). See [PR 5422](https://github.com/sigp/lighthouse/pull/5422) diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index 996f701e890..88fe48c4414 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -764,7 +764,7 @@ where } } else { tracing::error!(peer_id = %peer_id, - "Could not PUBLISH, peer doesn't exist in connected peer list"); + "Could not send PUBLISH, peer doesn't exist in connected peer list"); } } @@ -776,6 +776,11 @@ where return Err(PublishError::AllQueuesFull(recipient_peers.len())); } + // Broadcast IDONTWANT messages + if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() { + self.send_idontwant(&raw_message, &msg_id, raw_message.source.as_ref()); + } + tracing::debug!(message=%msg_id, "Published message"); if let Some(metrics) = self.metrics.as_mut() { @@ -1066,7 +1071,7 @@ where }); } else { tracing::error!(peer = %peer_id, - "Could not GRAFT, peer doesn't exist in connected peer list"); + "Could not send GRAFT, peer doesn't exist in connected peer list"); } // If the peer did not previously exist in any mesh, inform the handler @@ -1165,7 +1170,7 @@ where peer.sender.prune(prune); } else { tracing::error!(peer = %peer_id, - "Could not PRUNE, peer doesn't exist in connected peer list"); + "Could not send PRUNE, peer doesn't exist in connected peer list"); } // If the peer did not previously exist in any mesh, inform the handler @@ -1344,7 +1349,7 @@ where } } else { tracing::error!(peer = %peer_id, - "Could not IWANT, peer doesn't exist in connected peer list"); + "Could not send IWANT, peer doesn't exist in connected peer list"); } } tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); @@ -1367,7 +1372,7 @@ where for id in iwant_msgs { // If we have it and the IHAVE count is not above the threshold, - // foward the message. + // forward the message. if let Some((msg, count)) = self .mcache .get_with_iwant_counts(&id, peer_id) @@ -1407,7 +1412,7 @@ where } } else { tracing::error!(peer = %peer_id, - "Could not IWANT, peer doesn't exist in connected peer list"); + "Could not send IWANT, peer doesn't exist in connected peer list"); } } } @@ -1812,9 +1817,6 @@ where // Calculate the message id on the transformed data. let msg_id = self.config.message_id(&message); - // Broadcast IDONTWANT messages. - self.send_idontwant(&raw_message, &msg_id, propagation_source); - // Check the validity of the message // Peers get penalized if this message is invalid. We don't add it to the duplicate cache // and instead continually penalize peers that repeatedly send this message. @@ -1830,6 +1832,12 @@ where self.mcache.observe_duplicate(&msg_id, propagation_source); return; } + + // Broadcast IDONTWANT messages + if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() { + self.send_idontwant(&raw_message, &msg_id, Some(propagation_source)); + } + tracing::debug!( message=%msg_id, "Put message in duplicate_cache and resolve promises" @@ -2047,8 +2055,11 @@ where } } - // remove unsubscribed peers from the mesh if it exists + // remove unsubscribed peers from the mesh and fanout if they exist there. for (peer_id, topic_hash) in unsubscribed_peers { + self.fanout + .get_mut(&topic_hash) + .map(|peers| peers.remove(&peer_id)); self.remove_peer_from_mesh(&peer_id, &topic_hash, None, false, Churn::Unsub); } @@ -2072,7 +2083,7 @@ where } } else { tracing::error!(peer = %propagation_source, - "Could not GRAFT, peer doesn't exist in connected peer list"); + "Could not send GRAFT, peer doesn't exist in connected peer list"); } // Notify the application of the subscriptions @@ -2090,9 +2101,12 @@ where fn apply_iwant_penalties(&mut self) { if let Some((peer_score, ..)) = &mut self.peer_score { for (peer, count) in self.gossip_promises.get_broken_promises() { - peer_score.add_penalty(&peer, count); - if let Some(metrics) = self.metrics.as_mut() { - metrics.register_score_penalty(Penalty::BrokenPromise); + // We do not apply penalties to nodes that have disconnected. + if self.connected_peers.contains_key(&peer) { + peer_score.add_penalty(&peer, count); + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_score_penalty(Penalty::BrokenPromise); + } } } } @@ -2587,7 +2601,7 @@ where } } else { tracing::error!(peer = %peer_id, - "Could not IHAVE, peer doesn't exist in connected peer list"); + "Could not send IHAVE, peer doesn't exist in connected peer list"); } } } @@ -2673,7 +2687,7 @@ where peer.sender.prune(prune); } else { tracing::error!(peer = %peer_id, - "Could not PRUNE, peer doesn't exist in connected peer list"); + "Could not send PRUNE, peer doesn't exist in connected peer list"); } // inform the handler @@ -2693,7 +2707,7 @@ where &mut self, message: &RawMessage, msg_id: &MessageId, - propagation_source: &PeerId, + propagation_source: Option<&PeerId>, ) { let Some(mesh_peers) = self.mesh.get(&message.topic) else { return; @@ -2704,14 +2718,14 @@ where let recipient_peers = mesh_peers .iter() .chain(iwant_peers.iter()) - .filter(|peer_id| { - *peer_id != propagation_source && Some(*peer_id) != message.source.as_ref() + .filter(|&peer_id| { + Some(peer_id) != propagation_source && Some(peer_id) != message.source.as_ref() }); for peer_id in recipient_peers { let Some(peer) = self.connected_peers.get_mut(peer_id) else { - tracing::error!(peer = %peer_id, - "Could not IDONTWANT, peer doesn't exist in connected peer list"); + // It can be the case that promises to disconnected peers appear here. In this case + // we simply ignore the peer-id. continue; }; @@ -2976,7 +2990,7 @@ where } } else { tracing::error!(peer = %peer_id, - "Could not SUBSCRIBE, peer doesn't exist in connected peer list"); + "Could not send SUBSCRIBE, peer doesn't exist in connected peer list"); } } @@ -3348,6 +3362,8 @@ where }; if let Some(metrics) = self.metrics.as_mut() { metrics.register_idontwant(message_ids.len()); + let idontwant_size = message_ids.iter().map(|id| id.0.len()).sum(); + metrics.register_idontwant_bytes(idontwant_size); } for message_id in message_ids { peer.dont_send.insert(message_id, Instant::now()); diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs index 00de3ba2dbc..62f026b568a 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs @@ -5266,13 +5266,14 @@ fn sends_idontwant() { let message = RawMessage { source: Some(peers[1]), - data: vec![12], + data: vec![12u8; 1024], sequence_number: Some(0), topic: topic_hashes[0].clone(), signature: None, key: None, validated: true, }; + gs.handle_received_message(message.clone(), &local_id); assert_eq!( receivers @@ -5292,6 +5293,48 @@ fn sends_idontwant() { ); } +#[test] +fn doesnt_sends_idontwant_for_lower_message_size() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(5) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers + .into_iter() + .fold(0, |mut idontwants, (peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() { + assert_ne!(peer_id, peers[1]); + idontwants += 1; + } + } + idontwants + }), + 0, + "IDONTWANT was sent" + ); +} + /// Test that a node doesn't send IDONTWANT messages to the mesh peers /// that don't run Gossipsub v1.2. #[test] @@ -5316,6 +5359,7 @@ fn doesnt_send_idontwant() { key: None, validated: true, }; + gs.handle_received_message(message.clone(), &local_id); assert_eq!( receivers diff --git a/beacon_node/lighthouse_network/gossipsub/src/config.rs b/beacon_node/lighthouse_network/gossipsub/src/config.rs index 1296e614c89..eb8dd432a33 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/config.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/config.rs @@ -98,6 +98,7 @@ pub struct Config { connection_handler_queue_len: usize, connection_handler_publish_duration: Duration, connection_handler_forward_duration: Duration, + idontwant_message_size_threshold: usize, } impl Config { @@ -370,6 +371,16 @@ impl Config { pub fn forward_queue_duration(&self) -> Duration { self.connection_handler_forward_duration } + + // The message size threshold for which IDONTWANT messages are sent. + // Sending IDONTWANT messages for small messages can have a negative effect to the overall + // traffic and CPU load. This acts as a lower bound cutoff for the message size to which + // IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + // (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md#idontwant-message) + // default is 1kB + pub fn idontwant_message_size_threshold(&self) -> usize { + self.idontwant_message_size_threshold + } } impl Default for Config { @@ -440,6 +451,7 @@ impl Default for ConfigBuilder { connection_handler_queue_len: 5000, connection_handler_publish_duration: Duration::from_secs(5), connection_handler_forward_duration: Duration::from_millis(1000), + idontwant_message_size_threshold: 1000, }, invalid_protocol: false, } @@ -825,6 +837,17 @@ impl ConfigBuilder { self } + // The message size threshold for which IDONTWANT messages are sent. + // Sending IDONTWANT messages for small messages can have a negative effect to the overall + // traffic and CPU load. This acts as a lower bound cutoff for the message size to which + // IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + // (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md#idontwant-message) + // default is 1kB + pub fn idontwant_message_size_threshold(&mut self, size: usize) -> &mut Self { + self.config.idontwant_message_size_threshold = size; + self + } + /// Constructs a [`Config`] from the given configuration and validates the settings. pub fn build(&self) -> Result { // check all constraints on config @@ -895,6 +918,10 @@ impl std::fmt::Debug for Config { "published_message_ids_cache_time", &self.published_message_ids_cache_time, ); + let _ = builder.field( + "idontwant_message_size_threhold", + &self.idontwant_message_size_threshold, + ); builder.finish() } } diff --git a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs index 7e1cdac18ba..a4ac389a748 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs @@ -185,6 +185,9 @@ pub(crate) struct Metrics { /// The number of msg_id's we have received in every IDONTWANT control message. idontwant_msgs_ids: Counter, + /// The number of bytes we have received in every IDONTWANT control message. + idontwant_bytes: Counter, + /// The size of the priority queue. priority_queue_size: Histogram, /// The size of the non-priority queue. @@ -338,6 +341,16 @@ impl Metrics { metric }; + let idontwant_bytes = { + let metric = Counter::default(); + registry.register( + "idontwant_bytes", + "The total bytes we have received an IDONTWANT control messages", + metric.clone(), + ); + metric + }; + let memcache_misses = { let metric = Counter::default(); registry.register( @@ -390,6 +403,7 @@ impl Metrics { memcache_misses, topic_iwant_msgs, idontwant_msgs, + idontwant_bytes, idontwant_msgs_ids, priority_queue_size, non_priority_queue_size, @@ -589,6 +603,11 @@ impl Metrics { } } + /// Register receiving the total bytes of an IDONTWANT control message. + pub(crate) fn register_idontwant_bytes(&mut self, bytes: usize) { + self.idontwant_bytes.inc_by(bytes as u64); + } + /// Register receiving an IDONTWANT msg for this topic. pub(crate) fn register_idontwant(&mut self, msgs: usize) { self.idontwant_msgs.inc(); diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 7c95977140e..d70e50b1da3 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -19,6 +19,7 @@ pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; pub const DEFAULT_TCP_PORT: u16 = 9000u16; pub const DEFAULT_DISC_PORT: u16 = 9000u16; pub const DEFAULT_QUIC_PORT: u16 = 9001u16; +pub const DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD: usize = 1000usize; /// The maximum size of gossip messages. pub fn gossip_max_size(is_merge_enabled: bool, gossip_max_size: usize) -> usize { @@ -141,6 +142,10 @@ pub struct Config { /// Configuration for the inbound rate limiter (requests received by this node). pub inbound_rate_limiter_config: Option, + + /// Configuration for the minimum message size for which IDONTWANT messages are send in the mesh. + /// Lower the value reduces the optimization effect of the IDONTWANT messages. + pub idontwant_message_size_threshold: usize, } impl Config { @@ -352,6 +357,7 @@ impl Default for Config { outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, + idontwant_message_size_threshold: DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD, } } } @@ -433,6 +439,7 @@ pub fn gossipsub_config( gossipsub_config_params: GossipsubConfigParams, seconds_per_slot: u64, slots_per_epoch: u64, + idontwant_message_size_threshold: usize, ) -> gossipsub::Config { fn prefix( prefix: [u8; 4], @@ -440,28 +447,22 @@ pub fn gossipsub_config( fork_context: Arc, ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); - match fork_context.current_fork() { - ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb - | ForkName::Electra => { - let topic_len_bytes = topic_bytes.len().to_le_bytes(); - let mut vec = Vec::with_capacity( - prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), - ); - vec.extend_from_slice(&prefix); - vec.extend_from_slice(&topic_len_bytes); - vec.extend_from_slice(topic_bytes); - vec.extend_from_slice(&message.data); - vec - } - ForkName::Base => { - let mut vec = Vec::with_capacity(prefix.len() + message.data.len()); - vec.extend_from_slice(&prefix); - vec.extend_from_slice(&message.data); - vec - } + + if fork_context.current_fork().altair_enabled() { + let topic_len_bytes = topic_bytes.len().to_le_bytes(); + let mut vec = Vec::with_capacity( + prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), + ); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&topic_len_bytes); + vec.extend_from_slice(topic_bytes); + vec.extend_from_slice(&message.data); + vec + } else { + let mut vec = Vec::with_capacity(prefix.len() + message.data.len()); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&message.data); + vec } } let message_domain_valid_snappy = gossipsub_config_params.message_domain_valid_snappy; @@ -504,6 +505,7 @@ pub fn gossipsub_config( .duplicate_cache_time(duplicate_cache_time) .message_id_fn(gossip_message_id) .allow_self_origin(true) + .idontwant_message_size_threshold(idontwant_message_size_threshold) .build() .expect("valid gossipsub configuration") } diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 5c12290b970..ced803add80 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -122,6 +122,6 @@ pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; // pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; -pub use service::api_types::{PeerRequestId, Request, Response}; +pub use service::api_types::{PeerRequestId, Response}; pub use service::utils::*; pub use service::{Gossipsub, NetworkEvent}; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index c3f64a5a1f4..15445c7d645 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static NAT_OPEN: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 9f46f5daa09..c1e72d250ff 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -2,7 +2,7 @@ use crate::discovery::enr_ext::EnrExt; use crate::discovery::peer_id_to_node_id; -use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; +use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RpcErrorResponse}; use crate::service::TARGET_SUBNET_PEERS; use crate::{error, metrics, Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; @@ -338,15 +338,15 @@ impl PeerManager { { // This should be updated with the peer dialing. In fact created once the peer is // dialed + let peer_id = enr.peer_id(); if let Some(min_ttl) = min_ttl { self.network_globals .peers .write() - .update_min_ttl(&enr.peer_id(), min_ttl); + .update_min_ttl(&peer_id, min_ttl); } - let peer_id = enr.peer_id(); if self.dial_peer(enr) { - debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); + debug!(self.log, "Added discovered ENR peer to dial queue"; "peer_id" => %peer_id); to_dial_peers += 1; } } @@ -447,18 +447,6 @@ impl PeerManager { self.network_globals.peers.read().is_connected(peer_id) } - /// Reports whether the peer limit is reached in which case we stop allowing new incoming - /// connections. - pub fn peer_limit_reached(&self, count_dialing: bool) -> bool { - if count_dialing { - // This is an incoming connection so limit by the standard max peers - self.network_globals.connected_or_dialing_peers() >= self.max_peers() - } else { - // We dialed this peer, allow up to max_outbound_dialing_peers - self.network_globals.connected_peers() >= self.max_outbound_dialing_peers() - } - } - /// Updates `PeerInfo` with `identify` information. pub fn identify(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { @@ -526,8 +514,8 @@ impl PeerManager { PeerAction::HighToleranceError } RPCError::ErrorResponse(code, _) => match code { - RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError, - RPCResponseErrorCode::ResourceUnavailable => { + RpcErrorResponse::Unknown => PeerAction::HighToleranceError, + RpcErrorResponse::ResourceUnavailable => { // Don't ban on this because we want to retry with a block by root request. if matches!( protocol, @@ -558,9 +546,9 @@ impl PeerManager { ConnectionDirection::Incoming => return, } } - RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError, - RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError, - RPCResponseErrorCode::RateLimited => match protocol { + RpcErrorResponse::ServerError => PeerAction::MidToleranceError, + RpcErrorResponse::InvalidRequest => PeerAction::LowToleranceError, + RpcErrorResponse::RateLimited => match protocol { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, @@ -570,6 +558,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, @@ -577,7 +566,7 @@ impl PeerManager { Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, }, - RPCResponseErrorCode::BlobsNotFoundForBlock => PeerAction::LowToleranceError, + RpcErrorResponse::BlobsNotFoundForBlock => PeerAction::LowToleranceError, }, RPCError::SSZDecodeError(_) => PeerAction::Fatal, RPCError::UnsupportedProtocol => { @@ -597,6 +586,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::MetaData => PeerAction::Fatal, Protocol::Status => PeerAction::Fatal, } @@ -618,6 +608,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::Goodbye => return, Protocol::MetaData => return, Protocol::Status => return, diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index b7fd5b5e5d7..c40f78b4b08 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -15,7 +15,6 @@ use slog::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; -use crate::rpc::GoodbyeReason; use crate::types::SyncState; use crate::{metrics, ClearDialError}; @@ -94,26 +93,20 @@ impl NetworkBehaviour for PeerManager { } if let Some(enr) = self.peers_to_dial.pop() { - let peer_id = enr.peer_id(); - self.inject_peer_connection(&peer_id, ConnectingType::Dialing, Some(enr.clone())); - - let quic_multiaddrs = if self.quic_enabled { - let quic_multiaddrs = enr.multiaddr_quic(); - if !quic_multiaddrs.is_empty() { - debug!(self.log, "Dialing QUIC supported peer"; "peer_id"=> %peer_id, "quic_multiaddrs" => ?quic_multiaddrs); - } - quic_multiaddrs - } else { - Vec::new() - }; + self.inject_peer_connection(&enr.peer_id(), ConnectingType::Dialing, Some(enr.clone())); // Prioritize Quic connections over Tcp ones. - let multiaddrs = quic_multiaddrs - .into_iter() - .chain(enr.multiaddr_tcp()) - .collect(); + let multiaddrs = [ + self.quic_enabled + .then_some(enr.multiaddr_quic()) + .unwrap_or_default(), + enr.multiaddr_tcp(), + ] + .concat(); + + debug!(self.log, "Dialing peer"; "peer_id"=> %enr.peer_id(), "multiaddrs" => ?multiaddrs); return Poll::Ready(ToSwarm::Dial { - opts: DialOpts::peer_id(peer_id) + opts: DialOpts::peer_id(enr.peer_id()) .condition(PeerCondition::Disconnected) .addresses(multiaddrs) .build(), @@ -130,14 +123,7 @@ impl NetworkBehaviour for PeerManager { endpoint, other_established, .. - }) => { - // NOTE: We still need to handle the [`ConnectionEstablished`] because the - // [`NetworkBehaviour::handle_established_inbound_connection`] and - // [`NetworkBehaviour::handle_established_outbound_connection`] are fallible. This - // means another behaviour can kill the connection early, and we can't assume a - // peer as connected until this event is received. - self.on_connection_established(peer_id, endpoint, other_established) - } + }) => self.on_connection_established(peer_id, endpoint, other_established), FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, endpoint, @@ -206,6 +192,21 @@ impl NetworkBehaviour for PeerManager { "Connection to peer rejected: peer has a bad score", )); } + + // Check the connection limits + if self.network_globals.connected_or_dialing_peers() >= self.max_peers() + && self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map_or(true, |peer| !peer.has_future_duty()) + { + return Err(ConnectionDenied::new( + "Connection to peer rejected: too many connections", + )); + } + Ok(ConnectionHandler) } @@ -218,13 +219,26 @@ impl NetworkBehaviour for PeerManager { _port_use: PortUse, ) -> Result, libp2p::swarm::ConnectionDenied> { trace!(self.log, "Outbound connection"; "peer_id" => %peer_id, "multiaddr" => %addr); - match self.ban_status(&peer_id) { - Some(cause) => { - error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); - Err(ConnectionDenied::new(cause)) - } - None => Ok(ConnectionHandler), + if let Some(cause) = self.ban_status(&peer_id) { + error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); + return Err(ConnectionDenied::new(cause)); } + + // Check the connection limits + if self.network_globals.connected_peers() >= self.max_outbound_dialing_peers() + && self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map_or(true, |peer| !peer.has_future_duty()) + { + return Err(ConnectionDenied::new( + "Connection to peer rejected: too many connections", + )); + } + + Ok(ConnectionHandler) } } @@ -233,7 +247,7 @@ impl PeerManager { &mut self, peer_id: PeerId, endpoint: &ConnectedPoint, - other_established: usize, + _other_established: usize, ) { debug!(self.log, "Connection established"; "peer_id" => %peer_id, "multiaddr" => %endpoint.get_remote_address(), @@ -247,26 +261,6 @@ impl PeerManager { self.update_peer_count_metrics(); } - // Count dialing peers in the limit if the peer dialed us. - let count_dialing = endpoint.is_listener(); - // Check the connection limits - if self.peer_limit_reached(count_dialing) - && self - .network_globals - .peers - .read() - .peer_info(&peer_id) - .map_or(true, |peer| !peer.has_future_duty()) - { - // Gracefully disconnect the peer. - self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers); - return; - } - - if other_established == 0 { - self.events.push(PeerManagerEvent::MetaData(peer_id)); - } - // NOTE: We don't register peers that we are disconnecting immediately. The network service // does not need to know about these peers. match endpoint { diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 224fb8a5f71..9bdecab70b1 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -2,7 +2,7 @@ use crate::rpc::methods::*; use crate::rpc::protocol::{ Encoding, ProtocolId, RPCError, SupportedProtocol, ERROR_TYPE_MAX, ERROR_TYPE_MIN, }; -use crate::rpc::{InboundRequest, OutboundRequest}; +use crate::rpc::RequestType; use libp2p::bytes::BufMut; use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; @@ -18,9 +18,9 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, - SignedBeaconBlockElectra, + LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, }; use unsigned_varint::codec::Uvi; @@ -61,23 +61,24 @@ impl SSZSnappyInboundCodec { /// Encodes RPC Responses sent to peers. fn encode_response( &mut self, - item: RPCCodedResponse, + item: RpcResponse, dst: &mut BytesMut, ) -> Result<(), RPCError> { let bytes = match &item { - RPCCodedResponse::Success(resp) => match &resp { - RPCResponse::Status(res) => res.as_ssz_bytes(), - RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), - RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), - RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), - RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), - RPCResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), - RPCResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), - RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), - RPCResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), - RPCResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), - RPCResponse::Pong(res) => res.data.as_ssz_bytes(), - RPCResponse::MetaData(res) => + RpcResponse::Success(resp) => match &resp { + RpcSuccessResponse::Status(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlocksByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlobsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlobsByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientUpdatesByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::Pong(res) => res.data.as_ssz_bytes(), + RpcSuccessResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. { match self.protocol.versioned_protocol { @@ -92,8 +93,8 @@ impl SSZSnappyInboundCodec { } } }, - RPCCodedResponse::Error(_, err) => err.as_ssz_bytes(), - RPCCodedResponse::StreamTermination(_) => { + RpcResponse::Error(_, err) => err.as_ssz_bytes(), + RpcResponse::StreamTermination(_) => { unreachable!("Code error - attempting to encode a stream termination") } }; @@ -126,10 +127,10 @@ impl SSZSnappyInboundCodec { } // Encoder for inbound streams: Encodes RPC Responses sent to peers. -impl Encoder> for SSZSnappyInboundCodec { +impl Encoder> for SSZSnappyInboundCodec { type Error = RPCError; - fn encode(&mut self, item: RPCCodedResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RpcResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { dst.clear(); dst.reserve(1); dst.put_u8( @@ -142,18 +143,18 @@ impl Encoder> for SSZSnappyInboundCodec { // Decoder for inbound streams: Decodes RPC requests from peers impl Decoder for SSZSnappyInboundCodec { - type Item = InboundRequest; + type Item = RequestType; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV1 { - return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1()))); + return Ok(Some(RequestType::MetaData(MetadataRequest::new_v1()))); } if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 { - return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))); + return Ok(Some(RequestType::MetaData(MetadataRequest::new_v2()))); } if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV3 { - return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v3()))); + return Ok(Some(RequestType::MetaData(MetadataRequest::new_v3()))); } let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { return Ok(None); @@ -231,7 +232,10 @@ impl SSZSnappyOutboundCodec { } // Decode an Rpc response. - fn decode_response(&mut self, src: &mut BytesMut) -> Result>, RPCError> { + fn decode_response( + &mut self, + src: &mut BytesMut, + ) -> Result>, RPCError> { // Read the context bytes if required if self.protocol.has_context_bytes() && self.fork_name.is_none() { if src.len() >= CONTEXT_BYTES_LEN { @@ -318,28 +322,34 @@ impl SSZSnappyOutboundCodec { } // Encoder for outbound streams: Encodes RPC Requests to peers -impl Encoder> for SSZSnappyOutboundCodec { +impl Encoder> for SSZSnappyOutboundCodec { type Error = RPCError; - fn encode(&mut self, item: OutboundRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { - OutboundRequest::Status(req) => req.as_ssz_bytes(), - OutboundRequest::Goodbye(req) => req.as_ssz_bytes(), - OutboundRequest::BlocksByRange(r) => match r { + RequestType::Status(req) => req.as_ssz_bytes(), + RequestType::Goodbye(req) => req.as_ssz_bytes(), + RequestType::BlocksByRange(r) => match r { OldBlocksByRangeRequest::V1(req) => req.as_ssz_bytes(), OldBlocksByRangeRequest::V2(req) => req.as_ssz_bytes(), }, - OutboundRequest::BlocksByRoot(r) => match r { + RequestType::BlocksByRoot(r) => match r { BlocksByRootRequest::V1(req) => req.block_roots.as_ssz_bytes(), BlocksByRootRequest::V2(req) => req.block_roots.as_ssz_bytes(), }, - OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), - OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), - OutboundRequest::DataColumnsByRange(req) => req.as_ssz_bytes(), - OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), - OutboundRequest::Ping(req) => req.as_ssz_bytes(), - OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode + RequestType::BlobsByRange(req) => req.as_ssz_bytes(), + RequestType::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), + RequestType::DataColumnsByRange(req) => req.as_ssz_bytes(), + RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), + RequestType::Ping(req) => req.as_ssz_bytes(), + RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), + RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), + // no metadata to encode + RequestType::MetaData(_) + | RequestType::LightClientOptimisticUpdate + | RequestType::LightClientFinalityUpdate => return Ok(()), }; + // SSZ encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { return Err(RPCError::InternalError( @@ -369,7 +379,7 @@ impl Encoder> for SSZSnappyOutboundCodec { // We prefer to decode blocks and attestations with extra knowledge about the chain to perform // faster verification checks before decoding entire blocks/attestations. impl Decoder for SSZSnappyOutboundCodec { - type Item = RPCCodedResponse; + type Item = RpcResponse; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -385,15 +395,15 @@ impl Decoder for SSZSnappyOutboundCodec { }); let inner_result = { - if RPCCodedResponse::::is_response(response_code) { + if RpcResponse::::is_response(response_code) { // decode an actual response and mutates the buffer if enough bytes have been read // returning the result. self.decode_response(src) - .map(|r| r.map(RPCCodedResponse::Success)) + .map(|r| r.map(RpcResponse::Success)) } else { // decode an error self.decode_error(src) - .map(|r| r.map(|resp| RPCCodedResponse::from_error(response_code, resp))) + .map(|r| r.map(|resp| RpcResponse::from_error(response_code, resp))) } }; // if the inner decoder was capable of decoding a chunk, we need to reset the current @@ -437,14 +447,14 @@ fn handle_error( fn context_bytes( protocol: &ProtocolId, fork_context: &ForkContext, - resp: &RPCCodedResponse, + resp: &RpcResponse, ) -> Option<[u8; CONTEXT_BYTES_LEN]> { // Add the context bytes if required if protocol.has_context_bytes() { - if let RPCCodedResponse::Success(rpc_variant) = resp { + if let RpcResponse::Success(rpc_variant) = resp { match rpc_variant { - RPCResponse::BlocksByRange(ref_box_block) - | RPCResponse::BlocksByRoot(ref_box_block) => { + RpcSuccessResponse::BlocksByRange(ref_box_block) + | RpcSuccessResponse::BlocksByRoot(ref_box_block) => { return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! @@ -468,10 +478,11 @@ fn context_bytes( } }; } - RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) => { + RpcSuccessResponse::BlobsByRange(_) | RpcSuccessResponse::BlobsByRoot(_) => { return fork_context.to_context_bytes(ForkName::Deneb); } - RPCResponse::DataColumnsByRoot(d) | RPCResponse::DataColumnsByRange(d) => { + RpcSuccessResponse::DataColumnsByRoot(d) + | RpcSuccessResponse::DataColumnsByRange(d) => { // TODO(das): Remove deneb fork after `peerdas-devnet-2`. return if matches!( fork_context.spec.fork_name_at_slot::(d.slot()), @@ -482,20 +493,26 @@ fn context_bytes( fork_context.to_context_bytes(ForkName::Electra) }; } - RPCResponse::LightClientBootstrap(lc_bootstrap) => { + RpcSuccessResponse::LightClientBootstrap(lc_bootstrap) => { return lc_bootstrap .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } - RPCResponse::LightClientOptimisticUpdate(lc_optimistic_update) => { + RpcSuccessResponse::LightClientOptimisticUpdate(lc_optimistic_update) => { return lc_optimistic_update .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } - RPCResponse::LightClientFinalityUpdate(lc_finality_update) => { + RpcSuccessResponse::LightClientFinalityUpdate(lc_finality_update) => { return lc_finality_update .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } + RpcSuccessResponse::LightClientUpdatesByRange(lc_update) => { + return lc_update + .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); + } // These will not pass the has_context_bytes() check - RPCResponse::Status(_) | RPCResponse::Pong(_) | RPCResponse::MetaData(_) => { + RpcSuccessResponse::Status(_) + | RpcSuccessResponse::Pong(_) + | RpcSuccessResponse::MetaData(_) => { return None; } } @@ -536,21 +553,21 @@ fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], spec: &ChainSpec, -) -> Result>, RPCError> { +) -> Result>, RPCError> { match versioned_protocol { - SupportedProtocol::StatusV1 => Ok(Some(InboundRequest::Status( + SupportedProtocol::StatusV1 => Ok(Some(RequestType::Status( StatusMessage::from_ssz_bytes(decoded_buffer)?, ))), - SupportedProtocol::GoodbyeV1 => Ok(Some(InboundRequest::Goodbye( + SupportedProtocol::GoodbyeV1 => Ok(Some(RequestType::Goodbye( GoodbyeReason::from_ssz_bytes(decoded_buffer)?, ))), - SupportedProtocol::BlocksByRangeV2 => Ok(Some(InboundRequest::BlocksByRange( + SupportedProtocol::BlocksByRangeV2 => Ok(Some(RequestType::BlocksByRange( OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2::from_ssz_bytes(decoded_buffer)?), ))), - SupportedProtocol::BlocksByRangeV1 => Ok(Some(InboundRequest::BlocksByRange( + SupportedProtocol::BlocksByRangeV1 => Ok(Some(RequestType::BlocksByRange( OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1::from_ssz_bytes(decoded_buffer)?), ))), - SupportedProtocol::BlocksByRootV2 => Ok(Some(InboundRequest::BlocksByRoot( + SupportedProtocol::BlocksByRootV2 => Ok(Some(RequestType::BlocksByRoot( BlocksByRootRequest::V2(BlocksByRootRequestV2 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, @@ -558,7 +575,7 @@ fn handle_rpc_request( )?, }), ))), - SupportedProtocol::BlocksByRootV1 => Ok(Some(InboundRequest::BlocksByRoot( + SupportedProtocol::BlocksByRootV1 => Ok(Some(RequestType::BlocksByRoot( BlocksByRootRequest::V1(BlocksByRootRequestV1 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, @@ -566,21 +583,21 @@ fn handle_rpc_request( )?, }), ))), - SupportedProtocol::BlobsByRangeV1 => Ok(Some(InboundRequest::BlobsByRange( + SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange( BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), SupportedProtocol::BlobsByRootV1 => { - Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest { + Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest { blob_ids: RuntimeVariableList::from_ssz_bytes( decoded_buffer, spec.max_request_blob_sidecars as usize, )?, }))) } - SupportedProtocol::DataColumnsByRangeV1 => Ok(Some(InboundRequest::DataColumnsByRange( + SupportedProtocol::DataColumnsByRangeV1 => Ok(Some(RequestType::DataColumnsByRange( DataColumnsByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), - SupportedProtocol::DataColumnsByRootV1 => Ok(Some(InboundRequest::DataColumnsByRoot( + SupportedProtocol::DataColumnsByRootV1 => Ok(Some(RequestType::DataColumnsByRoot( DataColumnsByRootRequest { data_column_ids: RuntimeVariableList::from_ssz_bytes( decoded_buffer, @@ -588,19 +605,24 @@ fn handle_rpc_request( )?, }, ))), - SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { + SupportedProtocol::PingV1 => Ok(Some(RequestType::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - SupportedProtocol::LightClientBootstrapV1 => Ok(Some( - InboundRequest::LightClientBootstrap(LightClientBootstrapRequest { + SupportedProtocol::LightClientBootstrapV1 => Ok(Some(RequestType::LightClientBootstrap( + LightClientBootstrapRequest { root: Hash256::from_ssz_bytes(decoded_buffer)?, - }), - )), + }, + ))), SupportedProtocol::LightClientOptimisticUpdateV1 => { - Ok(Some(InboundRequest::LightClientOptimisticUpdate)) + Ok(Some(RequestType::LightClientOptimisticUpdate)) } SupportedProtocol::LightClientFinalityUpdateV1 => { - Ok(Some(InboundRequest::LightClientFinalityUpdate)) + Ok(Some(RequestType::LightClientFinalityUpdate)) + } + SupportedProtocol::LightClientUpdatesByRangeV1 => { + Ok(Some(RequestType::LightClientUpdatesByRange( + LightClientUpdatesByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))) } // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. @@ -610,7 +632,7 @@ fn handle_rpc_request( "Metadata requests shouldn't reach decoder", )) } else { - Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v3()))) + Ok(Some(RequestType::MetaData(MetadataRequest::new_v3()))) } } SupportedProtocol::MetaDataV2 => { @@ -619,14 +641,14 @@ fn handle_rpc_request( "Metadata requests shouldn't reach decoder", )) } else { - Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))) + Ok(Some(RequestType::MetaData(MetadataRequest::new_v2()))) } } SupportedProtocol::MetaDataV1 => { if !decoded_buffer.is_empty() { Err(RPCError::InvalidData("Metadata request".to_string())) } else { - Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1()))) + Ok(Some(RequestType::MetaData(MetadataRequest::new_v1()))) } } } @@ -642,31 +664,33 @@ fn handle_rpc_response( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], fork_name: Option, -) -> Result>, RPCError> { +) -> Result>, RPCError> { match versioned_protocol { - SupportedProtocol::StatusV1 => Ok(Some(RPCResponse::Status( + SupportedProtocol::StatusV1 => Ok(Some(RpcSuccessResponse::Status( StatusMessage::from_ssz_bytes(decoded_buffer)?, ))), // This case should be unreachable as `Goodbye` has no response. SupportedProtocol::GoodbyeV1 => Err(RPCError::InvalidData( "Goodbye RPC message has no valid response".to_string(), )), - SupportedProtocol::BlocksByRangeV1 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), - )))), - SupportedProtocol::BlocksByRootV1 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SupportedProtocol::BlocksByRangeV1 => { + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))) + } + SupportedProtocol::BlocksByRootV1 => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), SupportedProtocol::BlobsByRangeV1 => match fork_name { - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRange(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlobsByRange(Arc::new( BlobSidecar::from_ssz_bytes(decoded_buffer)?, )))), Some(_) => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for blobs by range".to_string(), )), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -674,15 +698,15 @@ fn handle_rpc_response( )), }, SupportedProtocol::BlobsByRootV1 => match fork_name { - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRoot(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlobsByRoot(Arc::new( BlobSidecar::from_ssz_bytes(decoded_buffer)?, )))), Some(_) => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for blobs by root".to_string(), )), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -695,18 +719,18 @@ fn handle_rpc_response( // does not advertise the topic on deneb, simply allows it to decode it. Advertise // logic is in `SupportedTopic::currently_supported`. if fork_name.deneb_enabled() { - Ok(Some(RPCResponse::DataColumnsByRoot(Arc::new( + Ok(Some(RpcSuccessResponse::DataColumnsByRoot(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) } else { Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for data columns by root".to_string(), )) } } None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -716,36 +740,36 @@ fn handle_rpc_response( SupportedProtocol::DataColumnsByRangeV1 => match fork_name { Some(fork_name) => { if fork_name.deneb_enabled() { - Ok(Some(RPCResponse::DataColumnsByRange(Arc::new( + Ok(Some(RpcSuccessResponse::DataColumnsByRange(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) } else { Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for data columns by range".to_string(), )) } } None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol ), )), }, - SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping { + SupportedProtocol::PingV1 => Ok(Some(RpcSuccessResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - SupportedProtocol::MetaDataV1 => Ok(Some(RPCResponse::MetaData(MetaData::V1( + SupportedProtocol::MetaDataV1 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V1( MetaDataV1::from_ssz_bytes(decoded_buffer)?, )))), SupportedProtocol::LightClientBootstrapV1 => match fork_name { - Some(fork_name) => Ok(Some(RPCResponse::LightClientBootstrap(Arc::new( + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientBootstrap(Arc::new( LightClientBootstrap::from_ssz_bytes(decoded_buffer, fork_name)?, )))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -753,11 +777,14 @@ fn handle_rpc_response( )), }, SupportedProtocol::LightClientOptimisticUpdateV1 => match fork_name { - Some(fork_name) => Ok(Some(RPCResponse::LightClientOptimisticUpdate(Arc::new( - LightClientOptimisticUpdate::from_ssz_bytes(decoded_buffer, fork_name)?, - )))), + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientOptimisticUpdate( + Arc::new(LightClientOptimisticUpdate::from_ssz_bytes( + decoded_buffer, + fork_name, + )?), + ))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -765,11 +792,29 @@ fn handle_rpc_response( )), }, SupportedProtocol::LightClientFinalityUpdateV1 => match fork_name { - Some(fork_name) => Ok(Some(RPCResponse::LightClientFinalityUpdate(Arc::new( - LightClientFinalityUpdate::from_ssz_bytes(decoded_buffer, fork_name)?, - )))), + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientFinalityUpdate( + Arc::new(LightClientFinalityUpdate::from_ssz_bytes( + decoded_buffer, + fork_name, + )?), + ))), + None => Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, + SupportedProtocol::LightClientUpdatesByRangeV1 => match fork_name { + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientUpdatesByRange( + Arc::new(LightClientUpdate::from_ssz_bytes( + decoded_buffer, + &fork_name, + )?), + ))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -777,40 +822,40 @@ fn handle_rpc_response( )), }, // MetaData V2/V3 responses have no context bytes, so behave similarly to V1 responses - SupportedProtocol::MetaDataV3 => Ok(Some(RPCResponse::MetaData(MetaData::V3( + SupportedProtocol::MetaDataV3 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V3( MetaDataV3::from_ssz_bytes(decoded_buffer)?, )))), - SupportedProtocol::MetaDataV2 => Ok(Some(RPCResponse::MetaData(MetaData::V2( + SupportedProtocol::MetaDataV2 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V2( MetaDataV2::from_ssz_bytes(decoded_buffer)?, )))), SupportedProtocol::BlocksByRangeV2 => match fork_name { - Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Altair) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Base) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Bellatrix) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Bellatrix) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Bellatrix(SignedBeaconBlockBellatrix::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Capella) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Electra) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Electra) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Electra(SignedBeaconBlockElectra::from_ssz_bytes( decoded_buffer, )?), )))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -818,32 +863,32 @@ fn handle_rpc_response( )), }, SupportedProtocol::BlocksByRootV2 => match fork_name { - Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Altair) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Base) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Bellatrix) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Bellatrix) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Bellatrix(SignedBeaconBlockBellatrix::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Capella) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Electra) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Electra) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Electra(SignedBeaconBlockElectra::from_ssz_bytes( decoded_buffer, )?), )))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -864,7 +909,7 @@ fn context_bytes_to_fork_name( .ok_or_else(|| { let encoded = hex::encode(context_bytes); RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "Context bytes {} do not correspond to a valid fork", encoded @@ -1063,7 +1108,7 @@ mod tests { /// Encodes the given protocol response as bytes. fn encode_response( protocol: SupportedProtocol, - message: RPCCodedResponse, + message: RpcResponse, fork_name: ForkName, spec: &ChainSpec, ) -> Result { @@ -1113,7 +1158,7 @@ mod tests { message: &mut BytesMut, fork_name: ForkName, spec: &ChainSpec, - ) -> Result>, RPCError> { + ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); @@ -1126,20 +1171,16 @@ mod tests { /// Encodes the provided protocol message as bytes and tries to decode the encoding bytes. fn encode_then_decode_response( protocol: SupportedProtocol, - message: RPCCodedResponse, + message: RpcResponse, fork_name: ForkName, spec: &ChainSpec, - ) -> Result>, RPCError> { + ) -> Result>, RPCError> { let mut encoded = encode_response(protocol, message, fork_name, spec)?; decode_response(protocol, &mut encoded, fork_name, spec) } /// Verifies that requests we send are encoded in a way that we would correctly decode too. - fn encode_then_decode_request( - req: OutboundRequest, - fork_name: ForkName, - spec: &ChainSpec, - ) { + fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); @@ -1162,35 +1203,48 @@ mod tests { ) }); match req { - OutboundRequest::Status(status) => { - assert_eq!(decoded, InboundRequest::Status(status)) + RequestType::Status(status) => { + assert_eq!(decoded, RequestType::Status(status)) + } + RequestType::Goodbye(goodbye) => { + assert_eq!(decoded, RequestType::Goodbye(goodbye)) } - OutboundRequest::Goodbye(goodbye) => { - assert_eq!(decoded, InboundRequest::Goodbye(goodbye)) + RequestType::BlocksByRange(bbrange) => { + assert_eq!(decoded, RequestType::BlocksByRange(bbrange)) } - OutboundRequest::BlocksByRange(bbrange) => { - assert_eq!(decoded, InboundRequest::BlocksByRange(bbrange)) + RequestType::BlocksByRoot(bbroot) => { + assert_eq!(decoded, RequestType::BlocksByRoot(bbroot)) } - OutboundRequest::BlocksByRoot(bbroot) => { - assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) + RequestType::BlobsByRange(blbrange) => { + assert_eq!(decoded, RequestType::BlobsByRange(blbrange)) } - OutboundRequest::BlobsByRange(blbrange) => { - assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange)) + RequestType::BlobsByRoot(bbroot) => { + assert_eq!(decoded, RequestType::BlobsByRoot(bbroot)) } - OutboundRequest::BlobsByRoot(bbroot) => { - assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot)) + RequestType::DataColumnsByRoot(dcbroot) => { + assert_eq!(decoded, RequestType::DataColumnsByRoot(dcbroot)) } - OutboundRequest::DataColumnsByRoot(dcbroot) => { - assert_eq!(decoded, InboundRequest::DataColumnsByRoot(dcbroot)) + RequestType::DataColumnsByRange(dcbrange) => { + assert_eq!(decoded, RequestType::DataColumnsByRange(dcbrange)) } - OutboundRequest::DataColumnsByRange(dcbrange) => { - assert_eq!(decoded, InboundRequest::DataColumnsByRange(dcbrange)) + RequestType::Ping(ping) => { + assert_eq!(decoded, RequestType::Ping(ping)) } - OutboundRequest::Ping(ping) => { - assert_eq!(decoded, InboundRequest::Ping(ping)) + RequestType::MetaData(metadata) => { + assert_eq!(decoded, RequestType::MetaData(metadata)) + } + RequestType::LightClientBootstrap(light_client_bootstrap_request) => { + assert_eq!( + decoded, + RequestType::LightClientBootstrap(light_client_bootstrap_request) + ) } - OutboundRequest::MetaData(metadata) => { - assert_eq!(decoded, InboundRequest::MetaData(metadata)) + RequestType::LightClientOptimisticUpdate | RequestType::LightClientFinalityUpdate => {} + RequestType::LightClientUpdatesByRange(light_client_updates_by_range) => { + assert_eq!( + decoded, + RequestType::LightClientUpdatesByRange(light_client_updates_by_range) + ) } } } @@ -1203,31 +1257,33 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::StatusV1, - RPCCodedResponse::Success(RPCResponse::Status(status_message())), + RpcResponse::Success(RpcSuccessResponse::Status(status_message())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::Status(status_message()))) + Ok(Some(RpcSuccessResponse::Status(status_message()))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::PingV1, - RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), + RpcResponse::Success(RpcSuccessResponse::Pong(ping_message())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::Pong(ping_message()))) + Ok(Some(RpcSuccessResponse::Pong(ping_message()))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1236,7 +1292,9 @@ mod tests { matches!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + altair_block() + ))), ForkName::Altair, &chain_spec, ) @@ -1249,20 +1307,24 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot( - Arc::new(empty_base_block()) - ))) + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + )))) ); assert!( matches!( encode_then_decode_response( SupportedProtocol::BlocksByRootV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot( + Arc::new(altair_block()) + )), ForkName::Altair, &chain_spec, ) @@ -1275,65 +1337,65 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV1, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata()))), + Ok(Some(RpcSuccessResponse::MetaData(metadata()))), ); // A MetaDataV2 still encodes as a MetaDataV1 since version is Version::V1 assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV1, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata_v2())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata()))), + Ok(Some(RpcSuccessResponse::MetaData(metadata()))), ); // A MetaDataV3 still encodes as a MetaDataV2 since version is Version::V2 assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v3())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata_v3())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata_v2()))), + Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, - RPCCodedResponse::Success(RPCResponse::BlobsByRange(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::BlobsByRange(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, - RPCCodedResponse::Success(RPCResponse::BlobsByRoot(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::BlobsByRoot(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, - RPCCodedResponse::Success(RPCResponse::DataColumnsByRange( + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( empty_data_column_sidecar() )), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::DataColumnsByRange( + Ok(Some(RpcSuccessResponse::DataColumnsByRange( empty_data_column_sidecar() ))), ); @@ -1341,13 +1403,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, - RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot( + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( empty_data_column_sidecar() )), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::DataColumnsByRoot( + Ok(Some(RpcSuccessResponse::DataColumnsByRoot( empty_data_column_sidecar() ))), ); @@ -1361,11 +1423,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1376,11 +1440,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block() + ))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1388,11 +1454,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( + altair_block() + )))) ); let bellatrix_block_small = @@ -1403,13 +1471,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new( + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( bellatrix_block_small.clone() ))), ForkName::Bellatrix, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( bellatrix_block_small.clone() )))) ); @@ -1435,13 +1503,15 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot( - Arc::new(empty_base_block()) - ))), + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + )))), ); // Decode the smallest possible base block when current fork is altair @@ -1450,35 +1520,39 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + ))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot( - Arc::new(empty_base_block()) - ))) + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + altair_block() + )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new( + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( bellatrix_block_small.clone() ))), ForkName::Bellatrix, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( bellatrix_block_small )))) ); @@ -1505,21 +1579,21 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata_v2()))) + Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata_v2())), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata_v2()))) + Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))) ); } @@ -1533,7 +1607,9 @@ mod tests { // Removing context bytes for v2 messages should error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block(), + ))), ForkName::Base, &chain_spec, ) @@ -1549,12 +1625,14 @@ mod tests { &chain_spec, ) .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + RPCError::ErrorResponse(RpcErrorResponse::InvalidRequest, _), )); let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block(), + ))), ForkName::Base, &chain_spec, ) @@ -1570,13 +1648,15 @@ mod tests { &chain_spec, ) .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + RPCError::ErrorResponse(RpcErrorResponse::InvalidRequest, _), )); // Trying to decode a base block with altair context bytes should give ssz decoding error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block(), + ))), ForkName::Altair, &chain_spec, ) @@ -1601,7 +1681,7 @@ mod tests { // Trying to decode an altair block with base context bytes should give ssz decoding error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, &chain_spec, ) @@ -1628,7 +1708,7 @@ mod tests { encoded_bytes.extend_from_slice( &encode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata())), ForkName::Altair, &chain_spec, ) @@ -1646,7 +1726,9 @@ mod tests { // Sending context bytes which do not correspond to any fork should return an error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block(), + ))), ForkName::Altair, &chain_spec, ) @@ -1664,13 +1746,15 @@ mod tests { &chain_spec, ) .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + RPCError::ErrorResponse(RpcErrorResponse::InvalidRequest, _), )); // Sending bytes less than context bytes length should wait for more bytes by returning `Ok(None)` let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block(), + ))), ForkName::Altair, &chain_spec, ) @@ -1693,20 +1777,20 @@ mod tests { fn test_encode_then_decode_request() { let chain_spec = Spec::default_spec(); - let requests: &[OutboundRequest] = &[ - OutboundRequest::Ping(ping_message()), - OutboundRequest::Status(status_message()), - OutboundRequest::Goodbye(GoodbyeReason::Fault), - OutboundRequest::BlocksByRange(bbrange_request_v1()), - OutboundRequest::BlocksByRange(bbrange_request_v2()), - OutboundRequest::BlocksByRoot(bbroot_request_v1(&chain_spec)), - OutboundRequest::BlocksByRoot(bbroot_request_v2(&chain_spec)), - OutboundRequest::MetaData(MetadataRequest::new_v1()), - OutboundRequest::BlobsByRange(blbrange_request()), - OutboundRequest::BlobsByRoot(blbroot_request(&chain_spec)), - OutboundRequest::DataColumnsByRange(dcbrange_request()), - OutboundRequest::DataColumnsByRoot(dcbroot_request(&chain_spec)), - OutboundRequest::MetaData(MetadataRequest::new_v2()), + let requests: &[RequestType] = &[ + RequestType::Ping(ping_message()), + RequestType::Status(status_message()), + RequestType::Goodbye(GoodbyeReason::Fault), + RequestType::BlocksByRange(bbrange_request_v1()), + RequestType::BlocksByRange(bbrange_request_v2()), + RequestType::BlocksByRoot(bbroot_request_v1(&chain_spec)), + RequestType::BlocksByRoot(bbroot_request_v2(&chain_spec)), + RequestType::MetaData(MetadataRequest::new_v1()), + RequestType::BlobsByRange(blbrange_request()), + RequestType::BlobsByRoot(blbroot_request(&chain_spec)), + RequestType::DataColumnsByRange(dcbrange_request()), + RequestType::DataColumnsByRoot(dcbroot_request(&chain_spec)), + RequestType::MetaData(MetadataRequest::new_v2()), ]; for req in requests.iter() { diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index fcb9c986048..42ece6dc4ff 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -96,6 +96,7 @@ pub struct RateLimiterConfig { pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, + pub(super) light_client_updates_by_range_quota: Quota, } impl RateLimiterConfig { @@ -121,6 +122,7 @@ impl RateLimiterConfig { pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); + pub const DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA: Quota = Quota::one_every(10); } impl Default for RateLimiterConfig { @@ -140,6 +142,7 @@ impl Default for RateLimiterConfig { light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, light_client_finality_update_quota: Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA, + light_client_updates_by_range_quota: Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA, } } } @@ -198,6 +201,7 @@ impl FromStr for RateLimiterConfig { let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; + let mut light_client_updates_by_range_quota = None; for proto_def in s.split(';') { let ProtocolQuota { protocol, quota } = proto_def.parse()?; @@ -228,6 +232,10 @@ impl FromStr for RateLimiterConfig { light_client_finality_update_quota = light_client_finality_update_quota.or(quota) } + Protocol::LightClientUpdatesByRange => { + light_client_updates_by_range_quota = + light_client_updates_by_range_quota.or(quota) + } } } Ok(RateLimiterConfig { @@ -252,6 +260,8 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA), light_client_finality_update_quota: light_client_finality_update_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA), + light_client_updates_by_range_quota: light_client_updates_by_range_quota + .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA), }) } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 6f338ebc8be..e76d6d27866 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -1,11 +1,12 @@ #![allow(clippy::type_complexity)] #![allow(clippy::cognitive_complexity)] -use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode}; +use super::methods::{GoodbyeReason, RpcErrorResponse, RpcResponse}; use super::outbound::OutboundRequestContainer; -use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; -use super::{RPCReceived, RPCSend, ReqId}; -use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; +use super::protocol::{InboundOutput, Protocol, RPCError, RPCProtocol, RequestType}; +use super::RequestId; +use super::{RPCReceived, RPCSend, ReqId, Request}; +use crate::rpc::outbound::OutboundFramed; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; use futures::prelude::*; @@ -95,7 +96,7 @@ where events_out: SmallVec<[HandlerEvent; 4]>, /// Queue of outbound substreams to open. - dial_queue: SmallVec<[(Id, OutboundRequest); 4]>, + dial_queue: SmallVec<[(Id, RequestType); 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, @@ -159,7 +160,7 @@ struct InboundInfo { /// State of the substream. state: InboundState, /// Responses queued for sending. - pending_items: VecDeque>, + pending_items: VecDeque>, /// Protocol of the original request we received from the peer. protocol: Protocol, /// Responses that the peer is still expecting from us. @@ -205,7 +206,7 @@ pub enum OutboundSubstreamState { /// The framed negotiated substream. substream: Box>, /// Keeps track of the actual request sent. - request: OutboundRequest, + request: RequestType, }, /// Closing an outbound substream> Closing(Box>), @@ -263,7 +264,7 @@ where // Queue our goodbye message. if let Some((id, reason)) = goodbye_reason { - self.dial_queue.push((id, OutboundRequest::Goodbye(reason))); + self.dial_queue.push((id, RequestType::Goodbye(reason))); } self.state = HandlerState::ShuttingDown(Box::pin(sleep(Duration::from_secs( @@ -273,7 +274,7 @@ where } /// Opens an outbound substream with a request. - fn send_request(&mut self, id: Id, req: OutboundRequest) { + fn send_request(&mut self, id: Id, req: RequestType) { match self.state { HandlerState::Active => { self.dial_queue.push((id, req)); @@ -291,10 +292,10 @@ where /// Sends a response to a peer's request. // NOTE: If the substream has closed due to inactivity, or the substream is in the // wrong state a response will fail silently. - fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse) { + fn send_response(&mut self, inbound_id: SubstreamId, response: RpcResponse) { // check if the stream matching the response still exists let Some(inbound_info) = self.inbound_substreams.get_mut(&inbound_id) else { - if !matches!(response, RPCCodedResponse::StreamTermination(..)) { + if !matches!(response, RpcResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses trace!(self.log, "Inbound stream has expired. Response not sent"; "response" => %response, "id" => inbound_id); @@ -302,7 +303,7 @@ where return; }; // If the response we are sending is an error, report back for handling - if let RPCCodedResponse::Error(ref code, ref reason) = response { + if let RpcResponse::Error(ref code, ref reason) = response { self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error: RPCError::ErrorResponse(*code, reason.to_string()), proto: inbound_info.protocol, @@ -329,7 +330,7 @@ where type ToBehaviour = HandlerEvent; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; - type OutboundOpenInfo = (Id, OutboundRequest); // Keep track of the id and the request + type OutboundOpenInfo = (Id, RequestType); // Keep track of the id and the request type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { @@ -403,8 +404,8 @@ where if info.pending_items.back().map(|l| l.close_after()) == Some(false) { // if the last chunk does not close the stream, append an error - info.pending_items.push_back(RPCCodedResponse::Error( - RPCResponseErrorCode::ServerError, + info.pending_items.push_back(RpcResponse::Error( + RpcErrorResponse::ServerError, "Request timed out".into(), )); } @@ -672,13 +673,13 @@ where let proto = entry.get().proto; let received = match response { - RPCCodedResponse::StreamTermination(t) => { + RpcResponse::StreamTermination(t) => { HandlerEvent::Ok(RPCReceived::EndOfStream(id, t)) } - RPCCodedResponse::Success(resp) => { + RpcResponse::Success(resp) => { HandlerEvent::Ok(RPCReceived::Response(id, resp)) } - RPCCodedResponse::Error(ref code, ref r) => { + RpcResponse::Error(ref code, ref r) => { HandlerEvent::Err(HandlerErr::Outbound { id, proto, @@ -888,21 +889,23 @@ where } // If we received a goodbye, shutdown the connection. - if let InboundRequest::Goodbye(_) = req { + if let RequestType::Goodbye(_) = req { self.shutdown(None); } - self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( - self.current_inbound_substream_id, - req, - ))); + self.events_out + .push(HandlerEvent::Ok(RPCReceived::Request(Request { + id: RequestId::next(), + substream_id: self.current_inbound_substream_id, + r#type: req, + }))); self.current_inbound_substream_id.0 += 1; } fn on_fully_negotiated_outbound( &mut self, substream: OutboundFramed, - (id, request): (Id, OutboundRequest), + (id, request): (Id, RequestType), ) { self.dial_negotiated -= 1; // Reset any io-retries counter. @@ -958,7 +961,7 @@ where } fn on_dial_upgrade_error( &mut self, - request_info: (Id, OutboundRequest), + request_info: (Id, RequestType), error: StreamUpgradeError, ) { let (id, req) = request_info; @@ -1016,15 +1019,15 @@ impl slog::Value for SubstreamId { /// error that occurred with sending a message is reported also. async fn send_message_to_inbound_substream( mut substream: InboundSubstream, - message: RPCCodedResponse, + message: RpcResponse, last_chunk: bool, ) -> Result<(InboundSubstream, bool), RPCError> { - if matches!(message, RPCCodedResponse::StreamTermination(_)) { + if matches!(message, RpcResponse::StreamTermination(_)) { substream.close().await.map(|_| (substream, true)) } else { // chunks that are not stream terminations get sent, and the stream is closed if // the response is an error - let is_error = matches!(message, RPCCodedResponse::Error(..)); + let is_error = matches!(message, RpcResponse::Error(..)); let send_result = substream.send(message).await; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 6e1ba9cd302..bb8bfb0e206 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -14,10 +14,11 @@ use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; +use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, + LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; /// Maximum length of error message. @@ -477,11 +478,39 @@ impl DataColumnsByRootRequest { } } +/// Request a number of beacon data columns from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct LightClientUpdatesByRangeRequest { + /// The starting period to request light client updates. + pub start_period: u64, + /// The number of periods from `start_period`. + pub count: u64, +} + +impl LightClientUpdatesByRangeRequest { + pub fn max_requested(&self) -> u64 { + MAX_REQUEST_LIGHT_CLIENT_UPDATES + } + + pub fn ssz_min_len() -> usize { + LightClientUpdatesByRangeRequest { + start_period: 0, + count: 0, + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len() -> usize { + Self::ssz_min_len() + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages #[derive(Debug, Clone, PartialEq)] -pub enum RPCResponse { +pub enum RpcSuccessResponse { /// A HELLO message. Status(StatusMessage), @@ -504,6 +533,9 @@ pub enum RPCResponse { /// A response to a get LIGHT_CLIENT_FINALITY_UPDATE request. LightClientFinalityUpdate(Arc>), + /// A response to a get LIGHT_CLIENT_UPDATES_BY_RANGE request. + LightClientUpdatesByRange(Arc>), + /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Arc>), @@ -540,16 +572,19 @@ pub enum ResponseTermination { /// Data column sidecars by range stream termination. DataColumnsByRange, + + /// Light client updates by range stream termination. + LightClientUpdatesByRange, } /// The structured response containing a result/code indicating success or failure /// and the contents of the response #[derive(Debug, Clone)] -pub enum RPCCodedResponse { +pub enum RpcResponse { /// The response is a successful. - Success(RPCResponse), + Success(RpcSuccessResponse), - Error(RPCResponseErrorCode, ErrorType), + Error(RpcErrorResponse, ErrorType), /// Received a stream termination indicating which response is being terminated. StreamTermination(ResponseTermination), @@ -564,7 +599,7 @@ pub struct LightClientBootstrapRequest { /// The code assigned to an erroneous `RPCResponse`. #[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] -pub enum RPCResponseErrorCode { +pub enum RpcErrorResponse { RateLimited, BlobsNotFoundForBlock, InvalidRequest, @@ -574,13 +609,13 @@ pub enum RPCResponseErrorCode { Unknown, } -impl RPCCodedResponse { +impl RpcResponse { /// Used to encode the response in the codec. pub fn as_u8(&self) -> Option { match self { - RPCCodedResponse::Success(_) => Some(0), - RPCCodedResponse::Error(code, _) => Some(code.as_u8()), - RPCCodedResponse::StreamTermination(_) => None, + RpcResponse::Success(_) => Some(0), + RpcResponse::Error(code, _) => Some(code.as_u8()), + RpcResponse::StreamTermination(_) => None, } } @@ -592,64 +627,67 @@ impl RPCCodedResponse { /// Builds an RPCCodedResponse from a response code and an ErrorMessage pub fn from_error(response_code: u8, err: ErrorType) -> Self { let code = match response_code { - 1 => RPCResponseErrorCode::InvalidRequest, - 2 => RPCResponseErrorCode::ServerError, - 3 => RPCResponseErrorCode::ResourceUnavailable, - 139 => RPCResponseErrorCode::RateLimited, - 140 => RPCResponseErrorCode::BlobsNotFoundForBlock, - _ => RPCResponseErrorCode::Unknown, + 1 => RpcErrorResponse::InvalidRequest, + 2 => RpcErrorResponse::ServerError, + 3 => RpcErrorResponse::ResourceUnavailable, + 139 => RpcErrorResponse::RateLimited, + 140 => RpcErrorResponse::BlobsNotFoundForBlock, + _ => RpcErrorResponse::Unknown, }; - RPCCodedResponse::Error(code, err) + RpcResponse::Error(code, err) } /// Returns true if this response always terminates the stream. pub fn close_after(&self) -> bool { - !matches!(self, RPCCodedResponse::Success(_)) + !matches!(self, RpcResponse::Success(_)) } } -impl RPCResponseErrorCode { +impl RpcErrorResponse { fn as_u8(&self) -> u8 { match self { - RPCResponseErrorCode::InvalidRequest => 1, - RPCResponseErrorCode::ServerError => 2, - RPCResponseErrorCode::ResourceUnavailable => 3, - RPCResponseErrorCode::Unknown => 255, - RPCResponseErrorCode::RateLimited => 139, - RPCResponseErrorCode::BlobsNotFoundForBlock => 140, + RpcErrorResponse::InvalidRequest => 1, + RpcErrorResponse::ServerError => 2, + RpcErrorResponse::ResourceUnavailable => 3, + RpcErrorResponse::Unknown => 255, + RpcErrorResponse::RateLimited => 139, + RpcErrorResponse::BlobsNotFoundForBlock => 140, } } } use super::Protocol; -impl RPCResponse { +impl RpcSuccessResponse { pub fn protocol(&self) -> Protocol { match self { - RPCResponse::Status(_) => Protocol::Status, - RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, - RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, - RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, - RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, - RPCResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, - RPCResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, - RPCResponse::Pong(_) => Protocol::Ping, - RPCResponse::MetaData(_) => Protocol::MetaData, - RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, - RPCResponse::LightClientOptimisticUpdate(_) => Protocol::LightClientOptimisticUpdate, - RPCResponse::LightClientFinalityUpdate(_) => Protocol::LightClientFinalityUpdate, + RpcSuccessResponse::Status(_) => Protocol::Status, + RpcSuccessResponse::BlocksByRange(_) => Protocol::BlocksByRange, + RpcSuccessResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, + RpcSuccessResponse::BlobsByRange(_) => Protocol::BlobsByRange, + RpcSuccessResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, + RpcSuccessResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, + RpcSuccessResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, + RpcSuccessResponse::Pong(_) => Protocol::Ping, + RpcSuccessResponse::MetaData(_) => Protocol::MetaData, + RpcSuccessResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, + RpcSuccessResponse::LightClientOptimisticUpdate(_) => { + Protocol::LightClientOptimisticUpdate + } + RpcSuccessResponse::LightClientFinalityUpdate(_) => Protocol::LightClientFinalityUpdate, + RpcSuccessResponse::LightClientUpdatesByRange(_) => Protocol::LightClientUpdatesByRange, } } } -impl std::fmt::Display for RPCResponseErrorCode { +impl std::fmt::Display for RpcErrorResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { - RPCResponseErrorCode::InvalidRequest => "The request was invalid", - RPCResponseErrorCode::ResourceUnavailable => "Resource unavailable", - RPCResponseErrorCode::ServerError => "Server error occurred", - RPCResponseErrorCode::Unknown => "Unknown error occurred", - RPCResponseErrorCode::RateLimited => "Rate limited", - RPCResponseErrorCode::BlobsNotFoundForBlock => "No blobs for the given root", + RpcErrorResponse::InvalidRequest => "The request was invalid", + RpcErrorResponse::ResourceUnavailable => "Resource unavailable", + RpcErrorResponse::ServerError => "Server error occurred", + RpcErrorResponse::Unknown => "Unknown error occurred", + RpcErrorResponse::RateLimited => "Rate limited", + RpcErrorResponse::BlobsNotFoundForBlock => "No blobs for the given root", }; f.write_str(repr) } @@ -661,61 +699,70 @@ impl std::fmt::Display for StatusMessage { } } -impl std::fmt::Display for RPCResponse { +impl std::fmt::Display for RpcSuccessResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - RPCResponse::Status(status) => write!(f, "{}", status), - RPCResponse::BlocksByRange(block) => { + RpcSuccessResponse::Status(status) => write!(f, "{}", status), + RpcSuccessResponse::BlocksByRange(block) => { write!(f, "BlocksByRange: Block slot: {}", block.slot()) } - RPCResponse::BlocksByRoot(block) => { + RpcSuccessResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } - RPCResponse::BlobsByRange(blob) => { + RpcSuccessResponse::BlobsByRange(blob) => { write!(f, "BlobsByRange: Blob slot: {}", blob.slot()) } - RPCResponse::BlobsByRoot(sidecar) => { + RpcSuccessResponse::BlobsByRoot(sidecar) => { write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot()) } - RPCResponse::DataColumnsByRoot(sidecar) => { + RpcSuccessResponse::DataColumnsByRoot(sidecar) => { write!(f, "DataColumnsByRoot: Data column slot: {}", sidecar.slot()) } - RPCResponse::DataColumnsByRange(sidecar) => { + RpcSuccessResponse::DataColumnsByRange(sidecar) => { write!( f, "DataColumnsByRange: Data column slot: {}", sidecar.slot() ) } - RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), - RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), - RPCResponse::LightClientBootstrap(bootstrap) => { + RpcSuccessResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), + RpcSuccessResponse::MetaData(metadata) => { + write!(f, "Metadata: {}", metadata.seq_number()) + } + RpcSuccessResponse::LightClientBootstrap(bootstrap) => { write!(f, "LightClientBootstrap Slot: {}", bootstrap.get_slot()) } - RPCResponse::LightClientOptimisticUpdate(update) => { + RpcSuccessResponse::LightClientOptimisticUpdate(update) => { write!( f, "LightClientOptimisticUpdate Slot: {}", update.signature_slot() ) } - RPCResponse::LightClientFinalityUpdate(update) => { + RpcSuccessResponse::LightClientFinalityUpdate(update) => { write!( f, "LightClientFinalityUpdate Slot: {}", update.signature_slot() ) } + RpcSuccessResponse::LightClientUpdatesByRange(update) => { + write!( + f, + "LightClientUpdatesByRange Slot: {}", + update.signature_slot(), + ) + } } } } -impl std::fmt::Display for RPCCodedResponse { +impl std::fmt::Display for RpcResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - RPCCodedResponse::Success(res) => write!(f, "{}", res), - RPCCodedResponse::Error(code, err) => write!(f, "{}: {}", code, err), - RPCCodedResponse::StreamTermination(_) => write!(f, "Stream Termination"), + RpcResponse::Success(res) => write!(f, "{}", res), + RpcResponse::Error(code, err) => write!(f, "{}: {}", code, err), + RpcResponse::StreamTermination(_) => write!(f, "Stream Termination"), } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 4961c31d28e..7d091da7660 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -16,6 +16,7 @@ use libp2p::PeerId; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use slog::{crit, debug, o, trace}; use std::marker::PhantomData; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; @@ -23,16 +24,15 @@ use types::{EthSpec, ForkContext}; pub(crate) use handler::{HandlerErr, HandlerEvent}; pub(crate) use methods::{ - MetaData, MetaDataV1, MetaDataV2, MetaDataV3, Ping, RPCCodedResponse, RPCResponse, + MetaData, MetaDataV1, MetaDataV2, MetaDataV3, Ping, RpcResponse, RpcSuccessResponse, }; -pub(crate) use protocol::InboundRequest; +pub use protocol::RequestType; pub use handler::SubstreamId; pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, - RPCResponseErrorCode, ResponseTermination, StatusMessage, + ResponseTermination, RpcErrorResponse, StatusMessage, }; -pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; @@ -48,6 +48,8 @@ mod protocol; mod rate_limiter; mod self_limiter; +static NEXT_REQUEST_ID: AtomicUsize = AtomicUsize::new(1); + /// Composite trait for a request id. pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {} impl ReqId for T where T: Send + 'static + std::fmt::Debug + Copy + Clone {} @@ -59,13 +61,13 @@ pub enum RPCSend { /// /// The `Id` is given by the application making the request. These /// go over *outbound* connections. - Request(Id, OutboundRequest), + Request(Id, RequestType), /// A response sent from Lighthouse. /// /// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the /// peer. The second parameter is a single chunk of a response. These go over *inbound* /// connections. - Response(SubstreamId, RPCCodedResponse), + Response(SubstreamId, RpcResponse), /// Lighthouse has requested to terminate the connection with a goodbye message. Shutdown(Id, GoodbyeReason), } @@ -77,17 +79,46 @@ pub enum RPCReceived { /// /// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the /// *inbound* substream over which it is managed. - Request(SubstreamId, InboundRequest), + Request(Request), /// A response received from the outside. /// /// The `Id` corresponds to the application given ID of the original request sent to the /// peer. The second parameter is a single chunk of a response. These go over *outbound* /// connections. - Response(Id, RPCResponse), + Response(Id, RpcSuccessResponse), /// Marks a request as completed EndOfStream(Id, ResponseTermination), } +/// Rpc `Request` identifier. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct RequestId(usize); + +impl RequestId { + /// Returns the next available [`RequestId`]. + pub fn next() -> Self { + Self(NEXT_REQUEST_ID.fetch_add(1, Ordering::SeqCst)) + } + + /// Creates an _unchecked_ [`RequestId`]. + /// + /// [`Rpc`] enforces that [`RequestId`]s are unique and not reused. + /// This constructor does not, hence the _unchecked_. + /// + /// It is primarily meant for allowing manual tests. + pub fn new_unchecked(id: usize) -> Self { + Self(id) + } +} + +/// An Rpc Request. +#[derive(Debug, Clone)] +pub struct Request { + pub id: RequestId, + pub substream_id: SubstreamId, + pub r#type: RequestType, +} + impl std::fmt::Display for RPCSend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -177,7 +208,8 @@ impl RPC { &mut self, peer_id: PeerId, id: (ConnectionId, SubstreamId), - event: RPCCodedResponse, + _request_id: RequestId, + event: RpcResponse, ) { self.events.push(ToSwarm::NotifyHandler { peer_id, @@ -189,7 +221,7 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: OutboundRequest) { + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { let event = if let Some(self_limiter) = self.self_limiter.as_mut() { match self_limiter.allows(peer_id, request_id, req) { Ok(event) => event, @@ -229,7 +261,7 @@ impl RPC { data: self.seq_number, }; trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); - self.send_request(peer_id, id, OutboundRequest::Ping(ping)); + self.send_request(peer_id, id, RequestType::Ping(ping)); } } @@ -368,13 +400,17 @@ where event: ::ToBehaviour, ) { match event { - HandlerEvent::Ok(RPCReceived::Request(id, req)) => { + HandlerEvent::Ok(RPCReceived::Request(Request { + id, + substream_id, + r#type, + })) => { if let Some(limiter) = self.limiter.as_mut() { // check if the request is conformant to the quota - match limiter.allows(&peer_id, &req) { + match limiter.allows(&peer_id, &r#type) { Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols - let protocol = req.versioned_protocol().protocol(); + let protocol = r#type.versioned_protocol().protocol(); if matches!( protocol, Protocol::BlocksByRange @@ -384,7 +420,7 @@ where | Protocol::BlobsByRoot | Protocol::DataColumnsByRoot ) { - debug!(self.log, "Request too large to process"; "request" => %req, "protocol" => %protocol); + debug!(self.log, "Request too large to process"; "request" => %r#type, "protocol" => %protocol); } else { // Other protocols shouldn't be sending large messages, we should flag the peer kind crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); @@ -393,9 +429,10 @@ where // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, + (conn_id, substream_id), + id, + RpcResponse::Error( + RpcErrorResponse::RateLimited, "Rate limited. Request too large".into(), ), ); @@ -403,30 +440,33 @@ where } Err(RateLimitedErr::TooSoon(wait_time)) => { debug!(self.log, "Request exceeds the rate limit"; - "request" => %req, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); + "request" => %r#type, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); // send an error code to the peer. // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, + (conn_id, substream_id), + id, + RpcResponse::Error( + RpcErrorResponse::RateLimited, format!("Wait {:?}", wait_time).into(), ), ); return; } // No rate limiting, continue. - Ok(_) => {} + Ok(()) => {} } } + // If we received a Ping, we queue a Pong response. - if let InboundRequest::Ping(_) = req { + if let RequestType::Ping(_) = r#type { trace!(self.log, "Received Ping, queueing Pong";"connection_id" => %conn_id, "peer_id" => %peer_id); self.send_response( peer_id, - (conn_id, id), - RPCCodedResponse::Success(RPCResponse::Pong(Ping { + (conn_id, substream_id), + id, + RpcResponse::Success(RpcSuccessResponse::Pong(Ping { data: self.seq_number, })), ); @@ -435,7 +475,11 @@ where self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, - message: Ok(RPCReceived::Request(id, req)), + message: Ok(RPCReceived::Request(Request { + id, + substream_id, + r#type, + })), })); } HandlerEvent::Ok(rpc) => { @@ -496,8 +540,8 @@ where match &self.message { Ok(received) => { let (msg_kind, protocol) = match received { - RPCReceived::Request(_, req) => { - ("request", req.versioned_protocol().protocol()) + RPCReceived::Request(Request { r#type, .. }) => { + ("request", r#type.versioned_protocol().protocol()) } RPCReceived::Response(_, res) => ("response", res.protocol()), RPCReceived::EndOfStream(_, end) => ( @@ -509,6 +553,9 @@ where ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, + ResponseTermination::LightClientUpdatesByRange => { + Protocol::LightClientUpdatesByRange + } }, ), }; diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 2bfa42ccac9..b614313a84b 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -1,7 +1,6 @@ -use super::methods::*; use super::protocol::ProtocolId; -use super::protocol::SupportedProtocol; use super::RPCError; +use super::RequestType; use crate::rpc::codec::SSZSnappyOutboundCodec; use crate::rpc::protocol::Encoding; use futures::future::BoxFuture; @@ -21,25 +20,11 @@ use types::{EthSpec, ForkContext}; #[derive(Debug, Clone)] pub struct OutboundRequestContainer { - pub req: OutboundRequest, + pub req: RequestType, pub fork_context: Arc, pub max_rpc_size: usize, } -#[derive(Debug, Clone, PartialEq)] -pub enum OutboundRequest { - Status(StatusMessage), - Goodbye(GoodbyeReason), - BlocksByRange(OldBlocksByRangeRequest), - BlocksByRoot(BlocksByRootRequest), - BlobsByRange(BlobsByRangeRequest), - BlobsByRoot(BlobsByRootRequest), - DataColumnsByRoot(DataColumnsByRootRequest), - DataColumnsByRange(DataColumnsByRangeRequest), - Ping(Ping), - MetaData(MetadataRequest), -} - impl UpgradeInfo for OutboundRequestContainer { type Info = ProtocolId; type InfoIter = Vec; @@ -50,133 +35,6 @@ impl UpgradeInfo for OutboundRequestContainer { } } -/// Implements the encoding per supported protocol for `RPCRequest`. -impl OutboundRequest { - pub fn supported_protocols(&self) -> Vec { - match self { - // add more protocols when versions/encodings are supported - OutboundRequest::Status(_) => vec![ProtocolId::new( - SupportedProtocol::StatusV1, - Encoding::SSZSnappy, - )], - OutboundRequest::Goodbye(_) => vec![ProtocolId::new( - SupportedProtocol::GoodbyeV1, - Encoding::SSZSnappy, - )], - OutboundRequest::BlocksByRange(_) => vec![ - ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy), - ], - OutboundRequest::BlocksByRoot(_) => vec![ - ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), - ], - OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new( - SupportedProtocol::BlobsByRangeV1, - Encoding::SSZSnappy, - )], - OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new( - SupportedProtocol::BlobsByRootV1, - Encoding::SSZSnappy, - )], - OutboundRequest::DataColumnsByRoot(_) => vec![ProtocolId::new( - SupportedProtocol::DataColumnsByRootV1, - Encoding::SSZSnappy, - )], - OutboundRequest::DataColumnsByRange(_) => vec![ProtocolId::new( - SupportedProtocol::DataColumnsByRangeV1, - Encoding::SSZSnappy, - )], - OutboundRequest::Ping(_) => vec![ProtocolId::new( - SupportedProtocol::PingV1, - Encoding::SSZSnappy, - )], - OutboundRequest::MetaData(_) => vec![ - ProtocolId::new(SupportedProtocol::MetaDataV3, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy), - ], - } - } - /* These functions are used in the handler for stream management */ - - /// Maximum number of responses expected for this request. - pub fn max_responses(&self) -> u64 { - match self { - OutboundRequest::Status(_) => 1, - OutboundRequest::Goodbye(_) => 0, - OutboundRequest::BlocksByRange(req) => *req.count(), - OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, - OutboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), - OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, - OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, - OutboundRequest::DataColumnsByRange(req) => req.max_requested::(), - OutboundRequest::Ping(_) => 1, - OutboundRequest::MetaData(_) => 1, - } - } - - pub fn expect_exactly_one_response(&self) -> bool { - match self { - OutboundRequest::Status(_) => true, - OutboundRequest::Goodbye(_) => false, - OutboundRequest::BlocksByRange(_) => false, - OutboundRequest::BlocksByRoot(_) => false, - OutboundRequest::BlobsByRange(_) => false, - OutboundRequest::BlobsByRoot(_) => false, - OutboundRequest::DataColumnsByRoot(_) => false, - OutboundRequest::DataColumnsByRange(_) => false, - OutboundRequest::Ping(_) => true, - OutboundRequest::MetaData(_) => true, - } - } - - /// Gives the corresponding `SupportedProtocol` to this request. - pub fn versioned_protocol(&self) -> SupportedProtocol { - match self { - OutboundRequest::Status(_) => SupportedProtocol::StatusV1, - OutboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1, - OutboundRequest::BlocksByRange(req) => match req { - OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1, - OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2, - }, - OutboundRequest::BlocksByRoot(req) => match req { - BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, - BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, - }, - OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, - OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, - OutboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, - OutboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, - OutboundRequest::Ping(_) => SupportedProtocol::PingV1, - OutboundRequest::MetaData(req) => match req { - MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, - MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, - MetadataRequest::V3(_) => SupportedProtocol::MetaDataV3, - }, - } - } - - /// Returns the `ResponseTermination` type associated with the request if a stream gets - /// terminated. - pub fn stream_termination(&self) -> ResponseTermination { - match self { - // this only gets called after `multiple_responses()` returns true. Therefore, only - // variants that have `multiple_responses()` can have values. - OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, - OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, - OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, - OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, - OutboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, - OutboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, - OutboundRequest::Status(_) => unreachable!(), - OutboundRequest::Goodbye(_) => unreachable!(), - OutboundRequest::Ping(_) => unreachable!(), - OutboundRequest::MetaData(_) => unreachable!(), - } - } -} - /* RPC Response type - used for outbound upgrades */ /* Outbound upgrades */ @@ -211,22 +69,3 @@ where .boxed() } } - -impl std::fmt::Display for OutboundRequest { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - OutboundRequest::Status(status) => write!(f, "Status Message: {}", status), - OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), - OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), - OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), - OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), - OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), - OutboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), - OutboundRequest::DataColumnsByRange(req) => { - write!(f, "Data columns by range: {:?}", req) - } - OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), - OutboundRequest::MetaData(_) => write!(f, "MetaData request"), - } - } -} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 09a18e5de6b..d0dbffe9326 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,7 +21,8 @@ use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, - LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, SignedBeaconBlock, + LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, Signature, + SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -143,6 +144,13 @@ pub static LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX: LazyLock = LazyLock::new(| LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Electra) }); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_CAPELLA_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Capella)); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Deneb)); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Electra)); + /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// The number of seconds to wait for the first bytes of a request once a protocol has been @@ -151,12 +159,10 @@ const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize { - match fork_context.current_fork() { - ForkName::Altair | ForkName::Base => max_chunk_size / 10, - ForkName::Bellatrix => max_chunk_size, - ForkName::Capella => max_chunk_size, - ForkName::Deneb => max_chunk_size, - ForkName::Electra => max_chunk_size, + if fork_context.current_fork().bellatrix_enabled() { + max_chunk_size + } else { + max_chunk_size / 10 } } @@ -192,6 +198,26 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { } } +fn rpc_light_client_updates_by_range_limits_by_fork(current_fork: ForkName) -> RpcLimits { + let altair_fixed_len = LightClientFinalityUpdateAltair::::ssz_fixed_len(); + + match ¤t_fork { + ForkName::Base => RpcLimits::new(0, 0), + ForkName::Altair | ForkName::Bellatrix => { + RpcLimits::new(altair_fixed_len, altair_fixed_len) + } + ForkName::Capella => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_CAPELLA_MAX) + } + ForkName::Deneb => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX) + } + ForkName::Electra => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX) + } + } +} + fn rpc_light_client_finality_update_limits_by_fork(current_fork: ForkName) -> RpcLimits { let altair_fixed_len = LightClientFinalityUpdateAltair::::ssz_fixed_len(); @@ -288,6 +314,9 @@ pub enum Protocol { /// The `LightClientFinalityUpdate` protocol name. #[strum(serialize = "light_client_finality_update")] LightClientFinalityUpdate, + /// The `LightClientUpdatesByRange` protocol name + #[strum(serialize = "light_client_updates_by_range")] + LightClientUpdatesByRange, } impl Protocol { @@ -306,6 +335,7 @@ impl Protocol { Protocol::LightClientBootstrap => None, Protocol::LightClientOptimisticUpdate => None, Protocol::LightClientFinalityUpdate => None, + Protocol::LightClientUpdatesByRange => None, } } } @@ -336,6 +366,7 @@ pub enum SupportedProtocol { LightClientBootstrapV1, LightClientOptimisticUpdateV1, LightClientFinalityUpdateV1, + LightClientUpdatesByRangeV1, } impl SupportedProtocol { @@ -358,6 +389,7 @@ impl SupportedProtocol { SupportedProtocol::LightClientBootstrapV1 => "1", SupportedProtocol::LightClientOptimisticUpdateV1 => "1", SupportedProtocol::LightClientFinalityUpdateV1 => "1", + SupportedProtocol::LightClientUpdatesByRangeV1 => "1", } } @@ -382,6 +414,7 @@ impl SupportedProtocol { Protocol::LightClientOptimisticUpdate } SupportedProtocol::LightClientFinalityUpdateV1 => Protocol::LightClientFinalityUpdate, + SupportedProtocol::LightClientUpdatesByRangeV1 => Protocol::LightClientUpdatesByRange, } } @@ -544,6 +577,10 @@ impl ProtocolId { ), Protocol::LightClientOptimisticUpdate => RpcLimits::new(0, 0), Protocol::LightClientFinalityUpdate => RpcLimits::new(0, 0), + Protocol::LightClientUpdatesByRange => RpcLimits::new( + LightClientUpdatesByRangeRequest::ssz_min_len(), + LightClientUpdatesByRangeRequest::ssz_max_len(), + ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty } } @@ -579,6 +616,9 @@ impl ProtocolId { Protocol::LightClientFinalityUpdate => { rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork()) } + Protocol::LightClientUpdatesByRange => { + rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork()) + } } } @@ -594,7 +634,8 @@ impl ProtocolId { | SupportedProtocol::DataColumnsByRangeV1 | SupportedProtocol::LightClientBootstrapV1 | SupportedProtocol::LightClientOptimisticUpdateV1 - | SupportedProtocol::LightClientFinalityUpdateV1 => true, + | SupportedProtocol::LightClientFinalityUpdateV1 + | SupportedProtocol::LightClientUpdatesByRangeV1 => true, SupportedProtocol::StatusV1 | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 @@ -645,7 +686,7 @@ pub fn rpc_data_column_limits() -> RpcLimits { // The inbound protocol reads the request, decodes it and returns the stream to the protocol // handler to respond to once ready. -pub type InboundOutput = (InboundRequest, InboundFramed); +pub type InboundOutput = (RequestType, InboundFramed); pub type InboundFramed = Framed>>>, SSZSnappyInboundCodec>; @@ -679,19 +720,19 @@ where // MetaData requests should be empty, return the stream match versioned_protocol { SupportedProtocol::MetaDataV1 => { - Ok((InboundRequest::MetaData(MetadataRequest::new_v1()), socket)) + Ok((RequestType::MetaData(MetadataRequest::new_v1()), socket)) } SupportedProtocol::MetaDataV2 => { - Ok((InboundRequest::MetaData(MetadataRequest::new_v2()), socket)) + Ok((RequestType::MetaData(MetadataRequest::new_v2()), socket)) } SupportedProtocol::MetaDataV3 => { - Ok((InboundRequest::MetaData(MetadataRequest::new_v3()), socket)) + Ok((RequestType::MetaData(MetadataRequest::new_v3()), socket)) } SupportedProtocol::LightClientOptimisticUpdateV1 => { - Ok((InboundRequest::LightClientOptimisticUpdate, socket)) + Ok((RequestType::LightClientOptimisticUpdate, socket)) } SupportedProtocol::LightClientFinalityUpdateV1 => { - Ok((InboundRequest::LightClientFinalityUpdate, socket)) + Ok((RequestType::LightClientFinalityUpdate, socket)) } _ => { match tokio::time::timeout( @@ -713,7 +754,7 @@ where } #[derive(Debug, Clone, PartialEq)] -pub enum InboundRequest { +pub enum RequestType { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), @@ -725,63 +766,68 @@ pub enum InboundRequest { LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, + LightClientUpdatesByRange(LightClientUpdatesByRangeRequest), Ping(Ping), MetaData(MetadataRequest), } /// Implements the encoding per supported protocol for `RPCRequest`. -impl InboundRequest { +impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. pub fn max_responses(&self) -> u64 { match self { - InboundRequest::Status(_) => 1, - InboundRequest::Goodbye(_) => 0, - InboundRequest::BlocksByRange(req) => *req.count(), - InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, - InboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), - InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, - InboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, - InboundRequest::DataColumnsByRange(req) => req.max_requested::(), - InboundRequest::Ping(_) => 1, - InboundRequest::MetaData(_) => 1, - InboundRequest::LightClientBootstrap(_) => 1, - InboundRequest::LightClientOptimisticUpdate => 1, - InboundRequest::LightClientFinalityUpdate => 1, + RequestType::Status(_) => 1, + RequestType::Goodbye(_) => 0, + RequestType::BlocksByRange(req) => *req.count(), + RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, + RequestType::BlobsByRange(req) => req.max_blobs_requested::(), + RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, + RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, + RequestType::DataColumnsByRange(req) => req.max_requested::(), + RequestType::Ping(_) => 1, + RequestType::MetaData(_) => 1, + RequestType::LightClientBootstrap(_) => 1, + RequestType::LightClientOptimisticUpdate => 1, + RequestType::LightClientFinalityUpdate => 1, + RequestType::LightClientUpdatesByRange(req) => req.count, } } /// Gives the corresponding `SupportedProtocol` to this request. pub fn versioned_protocol(&self) -> SupportedProtocol { match self { - InboundRequest::Status(_) => SupportedProtocol::StatusV1, - InboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1, - InboundRequest::BlocksByRange(req) => match req { + RequestType::Status(_) => SupportedProtocol::StatusV1, + RequestType::Goodbye(_) => SupportedProtocol::GoodbyeV1, + RequestType::BlocksByRange(req) => match req { OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1, OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2, }, - InboundRequest::BlocksByRoot(req) => match req { + RequestType::BlocksByRoot(req) => match req { BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, }, - InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, - InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, - InboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, - InboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, - InboundRequest::Ping(_) => SupportedProtocol::PingV1, - InboundRequest::MetaData(req) => match req { + RequestType::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, + RequestType::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, + RequestType::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, + RequestType::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, + RequestType::Ping(_) => SupportedProtocol::PingV1, + RequestType::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, MetadataRequest::V3(_) => SupportedProtocol::MetaDataV3, }, - InboundRequest::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1, - InboundRequest::LightClientOptimisticUpdate => { + RequestType::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1, + RequestType::LightClientOptimisticUpdate => { SupportedProtocol::LightClientOptimisticUpdateV1 } - InboundRequest::LightClientFinalityUpdate => { + RequestType::LightClientFinalityUpdate => { SupportedProtocol::LightClientFinalityUpdateV1 } + RequestType::LightClientUpdatesByRange(_) => { + SupportedProtocol::LightClientUpdatesByRangeV1 + } } } @@ -791,19 +837,102 @@ impl InboundRequest { match self { // this only gets called after `multiple_responses()` returns true. Therefore, only // variants that have `multiple_responses()` can have values. - InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, - InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, - InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, - InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, - InboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, - InboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, - InboundRequest::Status(_) => unreachable!(), - InboundRequest::Goodbye(_) => unreachable!(), - InboundRequest::Ping(_) => unreachable!(), - InboundRequest::MetaData(_) => unreachable!(), - InboundRequest::LightClientBootstrap(_) => unreachable!(), - InboundRequest::LightClientFinalityUpdate => unreachable!(), - InboundRequest::LightClientOptimisticUpdate => unreachable!(), + RequestType::BlocksByRange(_) => ResponseTermination::BlocksByRange, + RequestType::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + RequestType::BlobsByRange(_) => ResponseTermination::BlobsByRange, + RequestType::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, + RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, + RequestType::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, + RequestType::Status(_) => unreachable!(), + RequestType::Goodbye(_) => unreachable!(), + RequestType::Ping(_) => unreachable!(), + RequestType::MetaData(_) => unreachable!(), + RequestType::LightClientBootstrap(_) => unreachable!(), + RequestType::LightClientFinalityUpdate => unreachable!(), + RequestType::LightClientOptimisticUpdate => unreachable!(), + RequestType::LightClientUpdatesByRange(_) => unreachable!(), + } + } + + pub fn supported_protocols(&self) -> Vec { + match self { + // add more protocols when versions/encodings are supported + RequestType::Status(_) => vec![ProtocolId::new( + SupportedProtocol::StatusV1, + Encoding::SSZSnappy, + )], + RequestType::Goodbye(_) => vec![ProtocolId::new( + SupportedProtocol::GoodbyeV1, + Encoding::SSZSnappy, + )], + RequestType::BlocksByRange(_) => vec![ + ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy), + ], + RequestType::BlocksByRoot(_) => vec![ + ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), + ], + RequestType::BlobsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRangeV1, + Encoding::SSZSnappy, + )], + RequestType::BlobsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRootV1, + Encoding::SSZSnappy, + )], + RequestType::DataColumnsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRootV1, + Encoding::SSZSnappy, + )], + RequestType::DataColumnsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRangeV1, + Encoding::SSZSnappy, + )], + RequestType::Ping(_) => vec![ProtocolId::new( + SupportedProtocol::PingV1, + Encoding::SSZSnappy, + )], + RequestType::MetaData(_) => vec![ + ProtocolId::new(SupportedProtocol::MetaDataV3, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy), + ], + RequestType::LightClientBootstrap(_) => vec![ProtocolId::new( + SupportedProtocol::LightClientBootstrapV1, + Encoding::SSZSnappy, + )], + RequestType::LightClientOptimisticUpdate => vec![ProtocolId::new( + SupportedProtocol::LightClientOptimisticUpdateV1, + Encoding::SSZSnappy, + )], + RequestType::LightClientFinalityUpdate => vec![ProtocolId::new( + SupportedProtocol::LightClientFinalityUpdateV1, + Encoding::SSZSnappy, + )], + RequestType::LightClientUpdatesByRange(_) => vec![ProtocolId::new( + SupportedProtocol::LightClientUpdatesByRangeV1, + Encoding::SSZSnappy, + )], + } + } + + pub fn expect_exactly_one_response(&self) -> bool { + match self { + RequestType::Status(_) => true, + RequestType::Goodbye(_) => false, + RequestType::BlocksByRange(_) => false, + RequestType::BlocksByRoot(_) => false, + RequestType::BlobsByRange(_) => false, + RequestType::BlobsByRoot(_) => false, + RequestType::DataColumnsByRoot(_) => false, + RequestType::DataColumnsByRange(_) => false, + RequestType::Ping(_) => true, + RequestType::MetaData(_) => true, + RequestType::LightClientBootstrap(_) => true, + RequestType::LightClientOptimisticUpdate => true, + RequestType::LightClientFinalityUpdate => true, + RequestType::LightClientUpdatesByRange(_) => true, } } } @@ -819,7 +948,7 @@ pub enum RPCError { /// IO Error. IoError(String), /// The peer returned a valid response but the response indicated an error. - ErrorResponse(RPCResponseErrorCode, String), + ErrorResponse(RpcErrorResponse, String), /// Timed out waiting for a response. StreamTimeout, /// Peer does not support the protocol. @@ -898,30 +1027,33 @@ impl std::error::Error for RPCError { } } -impl std::fmt::Display for InboundRequest { +impl std::fmt::Display for RequestType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - InboundRequest::Status(status) => write!(f, "Status Message: {}", status), - InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), - InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), - InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), - InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), - InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), - InboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), - InboundRequest::DataColumnsByRange(req) => { + RequestType::Status(status) => write!(f, "Status Message: {}", status), + RequestType::Goodbye(reason) => write!(f, "Goodbye: {}", reason), + RequestType::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), + RequestType::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + RequestType::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), + RequestType::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), + RequestType::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), + RequestType::DataColumnsByRange(req) => { write!(f, "Data columns by range: {:?}", req) } - InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), - InboundRequest::MetaData(_) => write!(f, "MetaData request"), - InboundRequest::LightClientBootstrap(bootstrap) => { + RequestType::Ping(ping) => write!(f, "Ping: {}", ping.data), + RequestType::MetaData(_) => write!(f, "MetaData request"), + RequestType::LightClientBootstrap(bootstrap) => { write!(f, "Light client boostrap: {}", bootstrap.root) } - InboundRequest::LightClientOptimisticUpdate => { + RequestType::LightClientOptimisticUpdate => { write!(f, "Light client optimistic update request") } - InboundRequest::LightClientFinalityUpdate => { + RequestType::LightClientFinalityUpdate => { write!(f, "Light client finality update request") } + RequestType::LightClientUpdatesByRange(_) => { + write!(f, "Light client updates by range request") + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 523b891a009..ecbacc8c112 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -107,6 +107,8 @@ pub struct RPCRateLimiter { lc_optimistic_update_rl: Limiter, /// LightClientFinalityUpdate rate limiter. lc_finality_update_rl: Limiter, + /// LightClientUpdatesByRange rate limiter. + lc_updates_by_range_rl: Limiter, } /// Error type for non conformant requests @@ -147,6 +149,8 @@ pub struct RPCRateLimiterBuilder { lc_optimistic_update_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. lc_finality_update_quota: Option, + /// Quota for the LightClientUpdatesByRange protocol. + lc_updates_by_range_quota: Option, } impl RPCRateLimiterBuilder { @@ -167,6 +171,7 @@ impl RPCRateLimiterBuilder { Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, + Protocol::LightClientUpdatesByRange => self.lc_updates_by_range_quota = q, } self } @@ -192,6 +197,9 @@ impl RPCRateLimiterBuilder { let lc_finality_update_quota = self .lc_finality_update_quota .ok_or("LightClientFinalityUpdate quota not specified")?; + let lc_updates_by_range_quota = self + .lc_updates_by_range_quota + .ok_or("LightClientUpdatesByRange quota not specified")?; let blbrange_quota = self .blbrange_quota @@ -222,6 +230,7 @@ impl RPCRateLimiterBuilder { let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; + let lc_updates_by_range_rl = Limiter::from_quota(lc_updates_by_range_quota)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -242,6 +251,7 @@ impl RPCRateLimiterBuilder { lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, + lc_updates_by_range_rl, init_time: Instant::now(), }) } @@ -252,7 +262,7 @@ pub trait RateLimiterItem { fn max_responses(&self) -> u64; } -impl RateLimiterItem for super::InboundRequest { +impl RateLimiterItem for super::RequestType { fn protocol(&self) -> Protocol { self.versioned_protocol().protocol() } @@ -262,15 +272,6 @@ impl RateLimiterItem for super::InboundRequest { } } -impl RateLimiterItem for super::OutboundRequest { - fn protocol(&self) -> Protocol { - self.versioned_protocol().protocol() - } - - fn max_responses(&self) -> u64 { - self.max_responses() - } -} impl RPCRateLimiter { pub fn new_with_config(config: RateLimiterConfig) -> Result { // Destructure to make sure every configuration value is used. @@ -288,6 +289,7 @@ impl RPCRateLimiter { light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, + light_client_updates_by_range_quota, } = config; Self::builder() @@ -310,6 +312,10 @@ impl RPCRateLimiter { Protocol::LightClientFinalityUpdate, light_client_finality_update_quota, ) + .set_quota( + Protocol::LightClientUpdatesByRange, + light_client_updates_by_range_quota, + ) .build() } @@ -342,6 +348,7 @@ impl RPCRateLimiter { Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, + Protocol::LightClientUpdatesByRange => &mut self.lc_updates_by_range_rl, }; check(limiter) } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 77caecb16df..e968ad11e3d 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -14,13 +14,13 @@ use types::EthSpec; use super::{ config::OutboundRateLimiterConfig, rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}, - BehaviourAction, OutboundRequest, Protocol, RPCSend, ReqId, + BehaviourAction, Protocol, RPCSend, ReqId, RequestType, }; /// A request that was rate limited or waiting on rate limited requests for the same peer and /// protocol. struct QueuedRequest { - req: OutboundRequest, + req: RequestType, request_id: Id, } @@ -70,7 +70,7 @@ impl SelfRateLimiter { &mut self, peer_id: PeerId, request_id: Id, - req: OutboundRequest, + req: RequestType, ) -> Result, Error> { let protocol = req.versioned_protocol().protocol(); // First check that there are not already other requests waiting to be sent. @@ -101,7 +101,7 @@ impl SelfRateLimiter { limiter: &mut RateLimiter, peer_id: PeerId, request_id: Id, - req: OutboundRequest, + req: RequestType, log: &Logger, ) -> Result, (QueuedRequest, Duration)> { match limiter.allows(&peer_id, &req) { @@ -211,7 +211,7 @@ mod tests { use crate::rpc::config::{OutboundRateLimiterConfig, RateLimiterConfig}; use crate::rpc::rate_limiter::Quota; use crate::rpc::self_limiter::SelfRateLimiter; - use crate::rpc::{OutboundRequest, Ping, Protocol}; + use crate::rpc::{Ping, Protocol, RequestType}; use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId}; use libp2p::PeerId; use std::time::Duration; @@ -235,7 +235,7 @@ mod tests { RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id: i, })), - OutboundRequest::Ping(Ping { data: i as u64 }), + RequestType::Ping(Ping { data: i as u64 }), ); } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 30400db3b66..cb228153908 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -3,19 +3,12 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; use types::{ BlobSidecar, DataColumnSidecar, EthSpec, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, SignedBeaconBlock, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; -use crate::rpc::methods::{ - BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, -}; use crate::rpc::{ - methods::{ - BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, - OldBlocksByRangeRequest, OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2, - RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage, - }, - OutboundRequest, SubstreamId, + methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}, + SubstreamId, }; /// Identifier of requests sent by a peer. @@ -29,11 +22,6 @@ pub struct SingleLookupReqId { pub req_id: Id, } -/// Request ID for data_columns_by_root requests. Block lookup do not issue this requests directly. -/// Wrapping this particular req_id, ensures not mixing this requests with a custody req_id. -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub struct DataColumnsByRootRequestId(pub Id); - /// Id of rpc requests sent by sync to the network. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum SyncRequestId { @@ -42,11 +30,19 @@ pub enum SyncRequestId { /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, /// Request searching for a set of data columns given a hash and list of column indices. - DataColumnsByRoot(DataColumnsByRootRequestId, DataColumnsByRootRequester), + DataColumnsByRoot(DataColumnsByRootRequestId), /// Range request that is composed by both a block range request and a blob range request. RangeBlockAndBlobs { id: Id }, } +/// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. +/// Wrapping this particular req_id, ensures not mixing this request with a custody req_id. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct DataColumnsByRootRequestId { + pub id: Id, + pub requester: DataColumnsByRootRequester, +} + #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum DataColumnsByRootRequester { Sampling(SamplingId), @@ -93,69 +89,6 @@ pub enum RequestId { Internal, } -/// The type of RPC requests the Behaviour informs it has received and allows for sending. -/// -// NOTE: This is an application-level wrapper over the lower network level requests that can be -// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't -// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`. -#[derive(Debug, Clone, PartialEq)] -pub enum Request { - /// A Status message. - Status(StatusMessage), - /// A blocks by range request. - BlocksByRange(BlocksByRangeRequest), - /// A blobs by range request. - BlobsByRange(BlobsByRangeRequest), - /// A request blocks root request. - BlocksByRoot(BlocksByRootRequest), - // light client bootstrap request - LightClientBootstrap(LightClientBootstrapRequest), - // light client optimistic update request - LightClientOptimisticUpdate, - // light client finality update request - LightClientFinalityUpdate, - /// A request blobs root request. - BlobsByRoot(BlobsByRootRequest), - /// A request data columns root request. - DataColumnsByRoot(DataColumnsByRootRequest), - /// A request data columns by range request. - DataColumnsByRange(DataColumnsByRangeRequest), -} - -impl std::convert::From for OutboundRequest { - fn from(req: Request) -> OutboundRequest { - match req { - Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), - Request::BlocksByRange(r) => match r { - BlocksByRangeRequest::V1(req) => OutboundRequest::BlocksByRange( - OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { - start_slot: req.start_slot, - count: req.count, - step: 1, - }), - ), - BlocksByRangeRequest::V2(req) => OutboundRequest::BlocksByRange( - OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { - start_slot: req.start_slot, - count: req.count, - step: 1, - }), - ), - }, - Request::LightClientBootstrap(_) - | Request::LightClientOptimisticUpdate - | Request::LightClientFinalityUpdate => { - unreachable!("Lighthouse never makes an outbound light client request") - } - Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), - Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r), - Request::DataColumnsByRoot(r) => OutboundRequest::DataColumnsByRoot(r), - Request::DataColumnsByRange(r) => OutboundRequest::DataColumnsByRange(r), - Request::Status(s) => OutboundRequest::Status(s), - } - } -} - /// The type of RPC responses the Behaviour informs it has received, and allows for sending. /// // NOTE: This is an application-level wrapper over the lower network level responses that can be @@ -184,47 +117,53 @@ pub enum Response { LightClientOptimisticUpdate(Arc>), /// A response to a LightClientFinalityUpdate request. LightClientFinalityUpdate(Arc>), + /// A response to a LightClientUpdatesByRange request. + LightClientUpdatesByRange(Option>>), } -impl std::convert::From> for RPCCodedResponse { - fn from(resp: Response) -> RPCCodedResponse { +impl std::convert::From> for RpcResponse { + fn from(resp: Response) -> RpcResponse { match resp { Response::BlocksByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlocksByRoot), }, Response::BlocksByRange(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlocksByRange(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlocksByRange), }, Response::BlobsByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRoot(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRoot), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlobsByRoot), }, Response::BlobsByRange(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlobsByRange(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlobsByRange), }, Response::DataColumnsByRoot(r) => match r { - Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot(d)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRoot), + Some(d) => RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot(d)), + None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRoot), }, Response::DataColumnsByRange(r) => match r { - Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRange(d)), - None => { - RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRange) - } + Some(d) => RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange(d)), + None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRange), }, - Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + Response::Status(s) => RpcResponse::Success(RpcSuccessResponse::Status(s)), Response::LightClientBootstrap(b) => { - RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) + RpcResponse::Success(RpcSuccessResponse::LightClientBootstrap(b)) } Response::LightClientOptimisticUpdate(o) => { - RPCCodedResponse::Success(RPCResponse::LightClientOptimisticUpdate(o)) + RpcResponse::Success(RpcSuccessResponse::LightClientOptimisticUpdate(o)) } Response::LightClientFinalityUpdate(f) => { - RPCCodedResponse::Success(RPCResponse::LightClientFinalityUpdate(f)) + RpcResponse::Success(RpcSuccessResponse::LightClientFinalityUpdate(f)) } + Response::LightClientUpdatesByRange(f) => match f { + Some(d) => RpcResponse::Success(RpcSuccessResponse::LightClientUpdatesByRange(d)), + None => { + RpcResponse::StreamTermination(ResponseTermination::LightClientUpdatesByRange) + } + }, } } } @@ -245,8 +184,9 @@ impl slog::Value for RequestId { } } +// This custom impl reduces log boilerplate not printing `DataColumnsByRootRequestId` on each id log impl std::fmt::Display for DataColumnsByRootRequestId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) + write!(f, "{} {:?}", self.id, self.requester) } } diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs deleted file mode 100644 index ab2e43630bb..00000000000 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::discovery::Discovery; -use crate::peer_manager::PeerManager; -use crate::rpc::RPC; -use crate::types::SnappyTransform; - -use libp2p::identify; -use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::swarm::NetworkBehaviour; -use libp2p::upnp::tokio::Behaviour as Upnp; -use types::EthSpec; - -use super::api_types::RequestId; - -pub type SubscriptionFilter = - gossipsub::MaxCountSubscriptionFilter; -pub type Gossipsub = gossipsub::Behaviour; - -#[derive(NetworkBehaviour)] -pub(crate) struct Behaviour -where - E: EthSpec, -{ - /// Keep track of active and pending connections to enforce hard limits. - pub connection_limits: libp2p::connection_limits::Behaviour, - /// The peer manager that keeps track of peer's reputation and status. - pub peer_manager: PeerManager, - /// The Eth2 RPC specified in the wire-0 protocol. - pub eth2_rpc: RPC, - /// Discv5 Discovery protocol. - pub discovery: Discovery, - /// Keep regular connection to peers and disconnect if absent. - // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. - /// Provides IP addresses and peer information. - pub identify: identify::Behaviour, - /// Libp2p UPnP port mapping. - pub upnp: Toggle, - /// The routing pub-sub mechanism for eth2. - pub gossipsub: Gossipsub, -} diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ede8fdd13a7..b23e417adb0 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,4 +1,3 @@ -use self::behaviour::Behaviour; use self::gossip_cache::GossipCache; use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad}; use crate::discovery::{ @@ -11,12 +10,9 @@ use crate::peer_manager::{ use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::rpc::methods::MetadataRequest; use crate::rpc::{ - methods, BlocksByRangeRequest, GoodbyeReason, HandlerErr, InboundRequest, NetworkParams, - OutboundRequest, Protocol, RPCCodedResponse, RPCError, RPCMessage, RPCReceived, RPCResponse, - RPCResponseErrorCode, ResponseTermination, RPC, + self, GoodbyeReason, HandlerErr, NetworkParams, Protocol, RPCError, RPCMessage, RPCReceived, + RequestType, ResponseTermination, RpcErrorResponse, RpcResponse, RpcSuccessResponse, RPC, }; -use crate::service::behaviour::BehaviourEvent; -pub use crate::service::behaviour::Gossipsub; use crate::types::{ attestation_sync_committee_topics, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, @@ -25,7 +21,7 @@ use crate::types::{ use crate::EnrExt; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use api_types::{AppRequestId, PeerRequestId, Request, RequestId, Response}; +use api_types::{AppRequestId, PeerRequestId, RequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, @@ -34,24 +30,21 @@ use gossipsub::{ use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p::upnp::tokio::Behaviour as Upnp; use libp2p::{identify, PeerId, SwarmBuilder}; use slog::{crit, debug, info, o, trace, warn}; use std::num::{NonZeroU8, NonZeroUsize}; use std::path::PathBuf; use std::pin::Pin; -use std::{ - sync::Arc, - task::{Context, Poll}, -}; +use std::sync::Arc; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; use types::{ChainSpec, ForkName}; -use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; +use utils::{build_transport, strip_peer_id, Context as ServiceContext}; pub mod api_types; -mod behaviour; mod gossip_cache; pub mod gossipsub_scoring_parameters; pub mod utils; @@ -84,7 +77,7 @@ pub enum NetworkEvent { /// Identifier of the request. All responses to this request must use this id. id: PeerRequestId, /// Request the peer sent. - request: Request, + request: rpc::Request, }, ResponseReceived { /// Peer that sent the response. @@ -110,6 +103,41 @@ pub enum NetworkEvent { ZeroListeners, } +pub type Gossipsub = gossipsub::Behaviour; +pub type SubscriptionFilter = + gossipsub::MaxCountSubscriptionFilter; + +#[derive(NetworkBehaviour)] +pub(crate) struct Behaviour +where + E: EthSpec, +{ + // NOTE: The order of the following list of behaviours has meaning, + // `NetworkBehaviour::handle_{pending, established}_{inbound, outbound}` methods + // are called sequentially for each behaviour and they are fallible, + // therefore we want `connection_limits` and `peer_manager` running first, + // which are the behaviours that may reject a connection, so that + // when the subsequent behaviours are called they are certain the connection won't be rejected. + + // + /// Keep track of active and pending connections to enforce hard limits. + pub connection_limits: libp2p::connection_limits::Behaviour, + /// The peer manager that keeps track of peer's reputation and status. + pub peer_manager: PeerManager, + /// The Eth2 RPC specified in the wire-0 protocol. + pub eth2_rpc: RPC, + /// Discv5 Discovery protocol. + pub discovery: Discovery, + /// Keep regular connection to peers and disconnect if absent. + // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. + /// Provides IP addresses and peer information. + pub identify: identify::Behaviour, + /// Libp2p UPnP port mapping. + pub upnp: Toggle, + /// The routing pub-sub mechanism for eth2. + pub gossipsub: Gossipsub, +} + /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. @@ -206,6 +234,7 @@ impl Network { gossipsub_config_params, ctx.chain_spec.seconds_per_slot, E::slots_per_epoch(), + config.idontwant_message_size_threshold, ); let score_settings = PeerScoreSettings::new(&ctx.chain_spec, gs_config.mesh_n()); @@ -397,7 +426,7 @@ impl Network { (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) .ceil() as u32, )) - .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); + .with_max_established_per_peer(Some(1)); libp2p::connection_limits::Behaviour::new(limits) }; @@ -934,25 +963,28 @@ impl Network { &mut self, peer_id: PeerId, request_id: AppRequestId, - request: Request, + request: RequestType, ) -> Result<(), (AppRequestId, RPCError)> { // Check if the peer is connected before sending an RPC request if !self.swarm.is_connected(&peer_id) { return Err((request_id, RPCError::Disconnected)); } - self.eth2_rpc_mut().send_request( - peer_id, - RequestId::Application(request_id), - request.into(), - ); + self.eth2_rpc_mut() + .send_request(peer_id, RequestId::Application(request_id), request); Ok(()) } /// Send a successful response to a peer over RPC. - pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { + pub fn send_response( + &mut self, + peer_id: PeerId, + id: PeerRequestId, + request_id: rpc::RequestId, + response: Response, + ) { self.eth2_rpc_mut() - .send_response(peer_id, id, response.into()) + .send_response(peer_id, id, request_id, response.into()) } /// Inform the peer that their request produced an error. @@ -960,13 +992,15 @@ impl Network { &mut self, peer_id: PeerId, id: PeerRequestId, - error: RPCResponseErrorCode, + request_id: rpc::RequestId, + error: RpcErrorResponse, reason: String, ) { self.eth2_rpc_mut().send_response( peer_id, id, - RPCCodedResponse::Error(error, reason.into()), + request_id, + RpcResponse::Error(error, reason.into()), ) } @@ -1130,10 +1164,10 @@ impl Network { let event = if self.fork_context.spec.is_peer_das_scheduled() { // Nodes with higher custody will probably start advertising it // before peerdas is activated - OutboundRequest::MetaData(MetadataRequest::new_v3()) + RequestType::MetaData(MetadataRequest::new_v3()) } else { // We always prefer sending V2 requests otherwise - OutboundRequest::MetaData(MetadataRequest::new_v2()) + RequestType::MetaData(MetadataRequest::new_v2()) }; self.eth2_rpc_mut() .send_request(peer_id, RequestId::Internal, event); @@ -1144,12 +1178,14 @@ impl Network { &mut self, _req: MetadataRequest, id: PeerRequestId, + request_id: rpc::RequestId, peer_id: PeerId, ) { let metadata = self.network_globals.local_metadata.read().clone(); // The encoder is responsible for sending the negotiated version of the metadata - let event = RPCCodedResponse::Success(RPCResponse::MetaData(metadata)); - self.eth2_rpc_mut().send_response(peer_id, id, event); + let event = RpcResponse::Success(RpcSuccessResponse::MetaData(metadata)); + self.eth2_rpc_mut() + .send_response(peer_id, id, request_id, event); } // RPC Propagation methods @@ -1171,56 +1207,6 @@ impl Network { } } - /// Convenience function to propagate a request. - #[must_use = "actually return the event"] - fn build_request( - &mut self, - id: PeerRequestId, - peer_id: PeerId, - request: Request, - ) -> NetworkEvent { - // Increment metrics - match &request { - Request::Status(_) => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) - } - Request::LightClientBootstrap(_) => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["light_client_bootstrap"]) - } - Request::LightClientOptimisticUpdate => metrics::inc_counter_vec( - &metrics::TOTAL_RPC_REQUESTS, - &["light_client_optimistic_update"], - ), - Request::LightClientFinalityUpdate => metrics::inc_counter_vec( - &metrics::TOTAL_RPC_REQUESTS, - &["light_client_finality_update"], - ), - Request::BlocksByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) - } - Request::BlocksByRoot { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) - } - Request::BlobsByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]) - } - Request::BlobsByRoot { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]) - } - Request::DataColumnsByRoot { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_root"]) - } - Request::DataColumnsByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_range"]) - } - } - NetworkEvent::RequestReceived { - peer_id, - id, - request, - } - } - /// Dial cached Enrs in discovery service that are in the given `subnet_id` and aren't /// in Connected, Dialing or Banned state. fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet, spec: Arc) { @@ -1242,7 +1228,7 @@ impl Network { self.discovery_mut().remove_cached_enr(&enr.peer_id()); let peer_id = enr.peer_id(); if self.peer_manager_mut().dial_peer(enr) { - debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); + debug!(self.log, "Added cached ENR peer to dial queue"; "peer_id" => %peer_id); } } } @@ -1406,7 +1392,7 @@ impl Network { return None; } - let handler_id = event.conn_id; + let connection_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated match event.message { Err(handler_err) => { @@ -1444,21 +1430,25 @@ impl Network { } } } - Ok(RPCReceived::Request(id, request)) => { - let peer_request_id = (handler_id, id); - match request { + Ok(RPCReceived::Request(request)) => { + match request.r#type { /* Behaviour managed protocols: Ping and Metadata */ - InboundRequest::Ping(ping) => { + RequestType::Ping(ping) => { // inform the peer manager and send the response self.peer_manager_mut().ping_request(&peer_id, ping.data); None } - InboundRequest::MetaData(req) => { + RequestType::MetaData(req) => { // send the requested meta-data - self.send_meta_data_response(req, (handler_id, id), peer_id); + self.send_meta_data_response( + req, + (connection_id, request.substream_id), + request.id, + peer_id, + ); None } - InboundRequest::Goodbye(reason) => { + RequestType::Goodbye(reason) => { // queue for disconnection without a goodbye message debug!( self.log, "Peer sent Goodbye"; @@ -1473,17 +1463,19 @@ impl Network { None } /* Protocols propagated to the Network */ - InboundRequest::Status(msg) => { + RequestType::Status(_) => { // inform the peer manager that we have received a status from a peer self.peer_manager_mut().peer_statusd(&peer_id); + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]); // propagate the STATUS message upwards - let event = - self.build_request(peer_request_id, peer_id, Request::Status(msg)); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::BlocksByRange(req) => { + RequestType::BlocksByRange(ref req) => { // Still disconnect the peer if the request is naughty. - let mut count = *req.count(); if *req.step() == 0 { self.peer_manager_mut().handle_rpc_error( &peer_id, @@ -1495,135 +1487,164 @@ impl Network { ); return None; } - // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 - if *req.step() > 1 { - count = 1; - } - let request = match req { - methods::OldBlocksByRangeRequest::V1(req) => Request::BlocksByRange( - BlocksByRangeRequest::new_v1(req.start_slot, count), - ), - methods::OldBlocksByRangeRequest::V2(req) => Request::BlocksByRange( - BlocksByRangeRequest::new(req.start_slot, count), - ), - }; - let event = self.build_request(peer_request_id, peer_id, request); - Some(event) - } - InboundRequest::BlocksByRoot(req) => { - let event = self.build_request( - peer_request_id, - peer_id, - Request::BlocksByRoot(req), + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["blocks_by_range"], ); - Some(event) - } - InboundRequest::BlobsByRange(req) => { - let event = self.build_request( - peer_request_id, + Some(NetworkEvent::RequestReceived { peer_id, - Request::BlobsByRange(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::BlobsByRoot(req) => { - let event = - self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req)); - Some(event) + RequestType::BlocksByRoot(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]); + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::DataColumnsByRoot(req) => { - let event = self.build_request( - peer_request_id, + RequestType::BlobsByRange(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]); + Some(NetworkEvent::RequestReceived { peer_id, - Request::DataColumnsByRoot(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::DataColumnsByRange(req) => { - let event = self.build_request( - peer_request_id, + RequestType::BlobsByRoot(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]); + Some(NetworkEvent::RequestReceived { peer_id, - Request::DataColumnsByRange(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::LightClientBootstrap(req) => { - let event = self.build_request( - peer_request_id, + RequestType::DataColumnsByRoot(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["data_columns_by_root"], + ); + Some(NetworkEvent::RequestReceived { peer_id, - Request::LightClientBootstrap(req), + id: (connection_id, request.substream_id), + request, + }) + } + RequestType::DataColumnsByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["data_columns_by_range"], ); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::LightClientOptimisticUpdate => { - let event = self.build_request( - peer_request_id, + RequestType::LightClientBootstrap(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_bootstrap"], + ); + Some(NetworkEvent::RequestReceived { peer_id, - Request::LightClientOptimisticUpdate, + id: (connection_id, request.substream_id), + request, + }) + } + RequestType::LightClientOptimisticUpdate => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_optimistic_update"], ); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::LightClientFinalityUpdate => { - let event = self.build_request( - peer_request_id, + RequestType::LightClientFinalityUpdate => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_finality_update"], + ); + Some(NetworkEvent::RequestReceived { peer_id, - Request::LightClientFinalityUpdate, + id: (connection_id, request.substream_id), + request, + }) + } + RequestType::LightClientUpdatesByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_updates_by_range"], ); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } } } Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ - RPCResponse::Pong(ping) => { + RpcSuccessResponse::Pong(ping) => { self.peer_manager_mut().pong_response(&peer_id, ping.data); None } - RPCResponse::MetaData(meta_data) => { + RpcSuccessResponse::MetaData(meta_data) => { self.peer_manager_mut() .meta_data_response(&peer_id, meta_data); None } /* Network propagated protocols */ - RPCResponse::Status(msg) => { + RpcSuccessResponse::Status(msg) => { // inform the peer manager that we have received a status from a peer self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards self.build_response(id, peer_id, Response::Status(msg)) } - RPCResponse::BlocksByRange(resp) => { + RpcSuccessResponse::BlocksByRange(resp) => { self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } - RPCResponse::BlobsByRange(resp) => { + RpcSuccessResponse::BlobsByRange(resp) => { self.build_response(id, peer_id, Response::BlobsByRange(Some(resp))) } - RPCResponse::BlocksByRoot(resp) => { + RpcSuccessResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } - RPCResponse::BlobsByRoot(resp) => { + RpcSuccessResponse::BlobsByRoot(resp) => { self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) } - RPCResponse::DataColumnsByRoot(resp) => { + RpcSuccessResponse::DataColumnsByRoot(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRoot(Some(resp))) } - RPCResponse::DataColumnsByRange(resp) => { + RpcSuccessResponse::DataColumnsByRange(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) } // Should never be reached - RPCResponse::LightClientBootstrap(bootstrap) => { + RpcSuccessResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) } - RPCResponse::LightClientOptimisticUpdate(update) => self.build_response( + RpcSuccessResponse::LightClientOptimisticUpdate(update) => self.build_response( id, peer_id, Response::LightClientOptimisticUpdate(update), ), - RPCResponse::LightClientFinalityUpdate(update) => self.build_response( + RpcSuccessResponse::LightClientFinalityUpdate(update) => self.build_response( id, peer_id, Response::LightClientFinalityUpdate(update), ), + RpcSuccessResponse::LightClientUpdatesByRange(update) => self.build_response( + id, + peer_id, + Response::LightClientUpdatesByRange(Some(update)), + ), } } Ok(RPCReceived::EndOfStream(id, termination)) => { @@ -1634,6 +1655,9 @@ impl Network { ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), + ResponseTermination::LightClientUpdatesByRange => { + Response::LightClientUpdatesByRange(None) + } }; self.build_response(id, peer_id, response) } @@ -1767,157 +1791,148 @@ impl Network { /* Networking polling */ - /// Poll the p2p networking stack. - /// - /// This will poll the swarm and do maintenance routines. - pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { - while let Poll::Ready(Some(swarm_event)) = self.swarm.poll_next_unpin(cx) { - let maybe_event = match swarm_event { - SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { - // Handle sub-behaviour events. - BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), - BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), - // Inform the peer manager about discovered peers. - // - // The peer manager will subsequently decide which peers need to be dialed and then dial - // them. - BehaviourEvent::Discovery(DiscoveredPeers { peers }) => { - self.peer_manager_mut().peers_discovered(peers); - None - } - BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), - BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), - BehaviourEvent::Upnp(e) => { - self.inject_upnp_event(e); - None + pub async fn next_event(&mut self) -> NetworkEvent { + loop { + tokio::select! { + // Poll the libp2p `Swarm`. + // This will poll the swarm and do maintenance routines. + Some(event) = self.swarm.next() => { + if let Some(event) = self.parse_swarm_event(event) { + return event; } - #[allow(unreachable_patterns)] - BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), }, - SwarmEvent::ConnectionEstablished { .. } => None, - SwarmEvent::ConnectionClosed { .. } => None, - SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - connection_id: _, - } => { - trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); - None + + // perform gossipsub score updates when necessary + _ = self.update_gossipsub_scores.tick() => { + let this = self.swarm.behaviour_mut(); + this.peer_manager.update_gossipsub_scores(&this.gossipsub); } - SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - connection_id: _, - } => { - let error_repr = match error { - libp2p::swarm::ListenError::Aborted => { - "Incoming connection aborted".to_string() - } - libp2p::swarm::ListenError::WrongPeerId { obtained, endpoint } => { - format!("Wrong peer id, obtained {obtained}, endpoint {endpoint:?}") - } - libp2p::swarm::ListenError::LocalPeerId { endpoint } => { - format!("Dialing local peer id {endpoint:?}") - } - libp2p::swarm::ListenError::Denied { cause } => { - format!("Connection was denied with cause: {cause:?}") + // poll the gossipsub cache to clear expired messages + Some(result) = self.gossip_cache.next() => { + match result { + Err(e) => warn!(self.log, "Gossip cache error"; "error" => e), + Ok(expired_topic) => { + if let Some(v) = metrics::get_int_counter( + &metrics::GOSSIP_EXPIRED_LATE_PUBLISH_PER_TOPIC_KIND, + &[expired_topic.kind().as_ref()], + ) { + v.inc() + }; } - libp2p::swarm::ListenError::Transport(t) => match t { - libp2p::TransportError::MultiaddrNotSupported(m) => { - format!("Transport error: Multiaddr not supported: {m}") - } - libp2p::TransportError::Other(e) => { - format!("Transport error: other: {e}") - } - }, - }; - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr); - None + } } - SwarmEvent::OutgoingConnectionError { - peer_id: _, - error: _, - connection_id: _, - } => { - // The Behaviour event is more general than the swarm event here. It includes - // connection failures. So we use that log for now, in the peer manager - // behaviour implementation. + } + } + } + + fn parse_swarm_event( + &mut self, + event: SwarmEvent>, + ) -> Option> { + match event { + SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { + // Handle sub-behaviour events. + BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), + BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), + // Inform the peer manager about discovered peers. + // + // The peer manager will subsequently decide which peers need to be dialed and then dial + // them. + BehaviourEvent::Discovery(DiscoveredPeers { peers }) => { + self.peer_manager_mut().peers_discovered(peers); None } - SwarmEvent::NewListenAddr { address, .. } => { - Some(NetworkEvent::NewListenAddr(address)) - } - SwarmEvent::ExpiredListenAddr { address, .. } => { - debug!(self.log, "Listen address expired"; "address" => %address); + BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), + BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + BehaviourEvent::Upnp(e) => { + self.inject_upnp_event(e); None } - SwarmEvent::ListenerClosed { - addresses, reason, .. - } => { - match reason { - Ok(_) => { - debug!(self.log, "Listener gracefully closed"; "addresses" => ?addresses) + #[allow(unreachable_patterns)] + BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), + }, + SwarmEvent::ConnectionEstablished { .. } => None, + SwarmEvent::ConnectionClosed { .. } => None, + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + connection_id: _, + } => { + trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); + None + } + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + connection_id: _, + } => { + let error_repr = match error { + libp2p::swarm::ListenError::Aborted => { + "Incoming connection aborted".to_string() + } + libp2p::swarm::ListenError::WrongPeerId { obtained, endpoint } => { + format!("Wrong peer id, obtained {obtained}, endpoint {endpoint:?}") + } + libp2p::swarm::ListenError::LocalPeerId { endpoint } => { + format!("Dialing local peer id {endpoint:?}") + } + libp2p::swarm::ListenError::Denied { cause } => { + format!("Connection was denied with cause: {cause:?}") + } + libp2p::swarm::ListenError::Transport(t) => match t { + libp2p::TransportError::MultiaddrNotSupported(m) => { + format!("Transport error: Multiaddr not supported: {m}") } - Err(reason) => { - crit!(self.log, "Listener abruptly closed"; "addresses" => ?addresses, "reason" => ?reason) + libp2p::TransportError::Other(e) => { + format!("Transport error: other: {e}") } - }; - if Swarm::listeners(&self.swarm).count() == 0 { - Some(NetworkEvent::ZeroListeners) - } else { - None + }, + }; + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr); + None + } + SwarmEvent::OutgoingConnectionError { + peer_id: _, + error: _, + connection_id: _, + } => { + // The Behaviour event is more general than the swarm event here. It includes + // connection failures. So we use that log for now, in the peer manager + // behaviour implementation. + None + } + SwarmEvent::NewListenAddr { address, .. } => Some(NetworkEvent::NewListenAddr(address)), + SwarmEvent::ExpiredListenAddr { address, .. } => { + debug!(self.log, "Listen address expired"; "address" => %address); + None + } + SwarmEvent::ListenerClosed { + addresses, reason, .. + } => { + match reason { + Ok(_) => { + debug!(self.log, "Listener gracefully closed"; "addresses" => ?addresses) } - } - SwarmEvent::ListenerError { error, .. } => { - // Ignore quic accept and close errors. - if let Some(error) = error - .get_ref() - .and_then(|err| err.downcast_ref::()) - .filter(|err| matches!(err, libp2p::quic::Error::Connection(_))) - { - debug!(self.log, "Listener closed quic connection"; "reason" => ?error); - } else { - warn!(self.log, "Listener error"; "error" => ?error); + Err(reason) => { + crit!(self.log, "Listener abruptly closed"; "addresses" => ?addresses, "reason" => ?reason) } + }; + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { None } - _ => { - // NOTE: SwarmEvent is a non exhaustive enum so updates should be based on - // release notes more than compiler feedback - None - } - }; - - if let Some(ev) = maybe_event { - return Poll::Ready(ev); } - } - - // perform gossipsub score updates when necessary - while self.update_gossipsub_scores.poll_tick(cx).is_ready() { - let this = self.swarm.behaviour_mut(); - this.peer_manager.update_gossipsub_scores(&this.gossipsub); - } - - // poll the gossipsub cache to clear expired messages - while let Poll::Ready(Some(result)) = self.gossip_cache.poll_next_unpin(cx) { - match result { - Err(e) => warn!(self.log, "Gossip cache error"; "error" => e), - Ok(expired_topic) => { - if let Some(v) = metrics::get_int_counter( - &metrics::GOSSIP_EXPIRED_LATE_PUBLISH_PER_TOPIC_KIND, - &[expired_topic.kind().as_ref()], - ) { - v.inc() - }; - } + SwarmEvent::ListenerError { error, .. } => { + debug!(self.log, "Listener closed connection attempt"; "reason" => ?error); + None + } + _ => { + // NOTE: SwarmEvent is a non exhaustive enum so updates should be based on + // release notes more than compiler feedback + None } } - Poll::Pending - } - - pub async fn next_event(&mut self) -> NetworkEvent { - futures::future::poll_fn(|cx| self.poll_network(cx)).await } } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 81ee86b8b9b..f4988e68cd5 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -24,8 +24,6 @@ use types::{ }; pub const NETWORK_KEY_FILENAME: &str = "key"; -/// The maximum simultaneous libp2p connections per peer. -pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; /// The filename to store our local metadata. pub const METADATA_FILENAME: &str = "metadata"; diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index f271c9ff722..bcebd02a0ed 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -26,9 +26,9 @@ pub struct NetworkGlobals { pub sync_state: RwLock, /// The current state of the backfill sync. pub backfill_state: RwLock, - /// The computed custody subnets and columns is stored to avoid re-computing. - pub custody_subnets: Vec, - pub custody_columns: Vec, + /// The computed sampling subnets and columns is stored to avoid re-computing. + pub sampling_subnets: Vec, + pub sampling_columns: Vec, /// Network-related configuration. Immutable after initialization. pub config: Arc, /// Ethereum chain configuration. Immutable after initialization. @@ -45,24 +45,31 @@ impl NetworkGlobals { config: Arc, spec: Arc, ) -> Self { - let (custody_subnets, custody_columns) = if spec.is_peer_das_scheduled() { + let (sampling_subnets, sampling_columns) = if spec.is_peer_das_scheduled() { + let node_id = enr.node_id().raw(); + let custody_subnet_count = local_metadata .custody_subnet_count() .copied() .expect("custody subnet count must be set if PeerDAS is scheduled"); - let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( - enr.node_id().raw(), - custody_subnet_count, + + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); + + let sampling_subnets = DataColumnSubnetId::compute_custody_subnets::( + node_id, + subnet_sampling_size, &spec, ) - .expect("custody subnet count must be valid") + .expect("sampling subnet count must be valid") .collect::>(); - let custody_columns = custody_subnets + + let sampling_columns = sampling_subnets .iter() .flat_map(|subnet| subnet.columns::(&spec)) .sorted() .collect(); - (custody_subnets, custody_columns) + + (sampling_subnets, sampling_columns) } else { (vec![], vec![]) }; @@ -76,8 +83,8 @@ impl NetworkGlobals { gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), - custody_subnets, - custody_columns, + sampling_subnets, + sampling_columns, config, spec, } @@ -197,12 +204,13 @@ mod test { use types::{Epoch, EthSpec, MainnetEthSpec as E}; #[test] - fn test_custody_subnets() { + fn test_sampling_subnets() { let log = logging::test_logger(); let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); let metadata = get_metadata(custody_subnet_count); let config = Arc::new(NetworkConfig::default()); @@ -213,17 +221,20 @@ mod test { config, Arc::new(spec), ); - assert_eq!(globals.custody_subnets.len(), custody_subnet_count as usize); + assert_eq!( + globals.sampling_subnets.len(), + subnet_sampling_size as usize + ); } #[test] - fn test_custody_columns() { + fn test_sampling_columns() { let log = logging::test_logger(); let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; - let custody_columns_count = spec.number_of_columns / 2; + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); let metadata = get_metadata(custody_subnet_count); let config = Arc::new(NetworkConfig::default()); @@ -234,7 +245,10 @@ mod test { config, Arc::new(spec), ); - assert_eq!(globals.custody_columns.len(), custody_columns_count); + assert_eq!( + globals.sampling_columns.len(), + subnet_sampling_size as usize + ); } fn get_metadata(custody_subnet_count: u64) -> MetaData { diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 1bc99f9a6c4..9f68278e284 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -252,28 +252,25 @@ impl PubsubMessage { Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } GossipKind::BlobSidecar(blob_index) => { - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(ForkName::Deneb | ForkName::Electra) => { + if let Some(fork_name) = + fork_context.from_context_bytes(gossip_topic.fork_digest) + { + if fork_name.deneb_enabled() { let blob_sidecar = Arc::new( BlobSidecar::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ); - Ok(PubsubMessage::BlobSidecar(Box::new(( + return Ok(PubsubMessage::BlobSidecar(Box::new(( *blob_index, blob_sidecar, - )))) + )))); } - Some( - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella, - ) - | None => Err(format!( - "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", - gossip_topic.fork_digest - )), } + + Err(format!( + "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )) } GossipKind::DataColumnSidecar(subnet_id) => { match fork_context.from_context_bytes(gossip_topic.fork_digest) { diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 8a0416c1f8a..f721c8477cf 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -3,9 +3,9 @@ mod common; use common::Protocol; -use lighthouse_network::rpc::methods::*; +use lighthouse_network::rpc::{methods::*, RequestType}; use lighthouse_network::service::api_types::AppRequestId; -use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; +use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Response}; use slog::{debug, warn, Level}; use ssz::Encode; use ssz_types::VariableList; @@ -75,7 +75,7 @@ fn test_tcp_status_rpc() { .await; // Dummy STATUS RPC message - let rpc_request = Request::Status(StatusMessage { + let rpc_request = RequestType::Status(StatusMessage { fork_digest: [0; 4], finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), @@ -128,10 +128,10 @@ fn test_tcp_status_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response debug!(log, "Receiver Received"); - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response(peer_id, id, request.id, rpc_response.clone()); } } _ => {} // Ignore other events @@ -177,7 +177,12 @@ fn test_tcp_blocks_by_range_chunked_rpc() { .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: 0, + count: messages_to_send, + step: 1, + })); // BlocksByRange Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); @@ -247,7 +252,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for i in 0..messages_to_send { @@ -260,10 +265,20 @@ fn test_tcp_blocks_by_range_chunked_rpc() { } else { rpc_response_bellatrix_small.clone() }; - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); } } _ => {} // Ignore other events @@ -309,7 +324,7 @@ fn test_blobs_by_range_chunked_rpc() { .await; // BlobsByRange Request - let rpc_request = Request::BlobsByRange(BlobsByRangeRequest { + let rpc_request = RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: 0, count: slot_count, }); @@ -367,16 +382,26 @@ fn test_blobs_by_range_chunked_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 0..messages_to_send { // Send first third of responses as base blocks, // second as altair and third as bellatrix. - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlobsByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlobsByRange(None), + ); } } _ => {} // Ignore other events @@ -422,7 +447,12 @@ fn test_tcp_blocks_by_range_over_limit() { .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { + start_slot: 0, + count: messages_to_send, + step: 1, + })); // BlocksByRange Response let full_block = bellatrix_block_large(&common::fork_context(ForkName::Bellatrix), &spec); @@ -460,15 +490,25 @@ fn test_tcp_blocks_by_range_over_limit() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 0..messages_to_send { let rpc_response = rpc_response_bellatrix_large.clone(); - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); } } _ => {} // Ignore other events @@ -514,7 +554,12 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: 0, + count: messages_to_send, + step: 1, + })); // BlocksByRange Response let empty_block = BeaconBlock::empty(&spec); @@ -583,10 +628,10 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { }, _, )) => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); - message_info = Some((peer_id, id)); + message_info = Some((peer_id, id, request.id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -596,8 +641,8 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); + let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -642,7 +687,12 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: 0, + count: 10, + step: 1, + })); // BlocksByRange Response let empty_block = BeaconBlock::empty(&spec); @@ -696,15 +746,25 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); } } _ => {} // Ignore other events @@ -750,7 +810,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { .await; // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( vec![ Hash256::zero(), Hash256::zero(), @@ -827,7 +887,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response debug!(log, "Receiver got request"); @@ -840,11 +900,16 @@ fn test_tcp_blocks_by_root_chunked_rpc() { } else { rpc_response_bellatrix_small.clone() }; - receiver.send_response(peer_id, id, rpc_response); + receiver.send_response(peer_id, id, request.id, rpc_response); debug!(log, "Sending message"); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); debug!(log, "Send stream term"); } } @@ -888,7 +953,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { .await; // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( vec![ Hash256::zero(), Hash256::zero(), @@ -971,10 +1036,10 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { }, _, )) => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); - message_info = Some((peer_id, id)); + message_info = Some((peer_id, id, request.id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -984,8 +1049,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); + let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index fed346127f0..500cd23faeb 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -15,6 +15,7 @@ eth2 = { workspace = true } gossipsub = { workspace = true } eth2_network_config = { workspace = true } kzg = { workspace = true } +bls = { workspace = true } [dependencies] alloy-primitives = { workspace = true } @@ -37,7 +38,7 @@ smallvec = { workspace = true } rand = { workspace = true } fnv = { workspace = true } alloy-rlp = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } logging = { workspace = true } task_executor = { workspace = true } igd-next = "0.14" @@ -57,3 +58,4 @@ disable-backfill = [] fork_from_env = ["beacon_chain/fork_from_env"] portable = ["beacon_chain/portable"] test_logger = [] +ci_logger = [] diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 9e42aa8e924..4b7e8a50a36 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -5,11 +5,11 @@ use beacon_chain::{ sync_committee_verification::Error as SyncCommitteeError, }; use fnv::FnvHashMap; -pub use lighthouse_metrics::*; use lighthouse_network::{ peer_manager::peerdb::client::ClientKind, types::GossipKind, GossipTopic, Gossipsub, NetworkGlobals, }; +pub use metrics::*; use std::sync::{Arc, LazyLock}; use strum::IntoEnumIterator; use types::EthSpec; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 005536bcf25..4d875cb4a14 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,6 +4,7 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; +use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::store::Error; @@ -18,13 +19,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; -use beacon_chain::{ - blob_verification::{GossipBlobError, GossipVerifiedBlob}, - data_availability_checker::DataColumnsToPublish, -}; -use lighthouse_network::{ - Client, MessageAcceptance, MessageId, PeerAction, PeerId, PubsubMessage, ReportSource, -}; +use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; @@ -171,26 +166,6 @@ impl NetworkBeaconProcessor { }) } - pub(crate) fn handle_data_columns_to_publish( - &self, - data_columns_to_publish: DataColumnsToPublish, - ) { - if let Some(data_columns_to_publish) = data_columns_to_publish { - self.send_network_message(NetworkMessage::Publish { - messages: data_columns_to_publish - .iter() - .map(|d| { - let subnet = DataColumnSubnetId::from_column_index::( - d.index as usize, - &self.chain.spec, - ); - PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) - }) - .collect(), - }); - } - } - /// Send a message on `message_tx` that the `message_id` sent by `peer_id` should be propagated on /// the gossip network. /// @@ -711,6 +686,9 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::InvalidSubnetId { .. } | GossipDataColumnError::InvalidInclusionProof { .. } | GossipDataColumnError::InvalidKzgProof { .. } + | GossipDataColumnError::UnexpectedDataColumn + | GossipDataColumnError::InvalidColumnIndex(_) + | GossipDataColumnError::InconsistentCommitmentsOrProofLength | GossipDataColumnError::NotFinalizedDescendant { .. } => { debug!( self.log, @@ -1019,9 +997,7 @@ impl NetworkBeaconProcessor { .process_gossip_data_columns(vec![verified_data_column], || Ok(())) .await { - Ok((availability, data_columns_to_publish)) => { - self.handle_data_columns_to_publish(data_columns_to_publish); - + Ok(availability) => { match availability { AvailabilityProcessingStatus::Imported(block_root) => { // Note: Reusing block imported metric here @@ -1049,7 +1025,7 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); - // Potentially trigger reconstruction + self.attempt_data_column_reconstruction(block_root).await; } } } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7f551c544c7..76f5e886ff2 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -2,27 +2,33 @@ use crate::sync::manager::BlockProcessType; use crate::sync::SamplingId; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend, BeaconChain}; +use beacon_chain::{ + builder::Witness, eth1_chain::CachingEth1Backend, AvailabilityProcessingStatus, BeaconChain, +}; use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorChannels, BeaconProcessorSend, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; +use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, + LightClientUpdatesByRangeRequest, }; +use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, - Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, + Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, }; -use slog::{debug, Logger}; +use slog::{debug, error, trace, Logger}; use slot_clock::ManualSlotClock; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use store::MemoryStore; use task_executor::TaskExecutor; +use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::{self, error::TrySendError}; use types::*; @@ -596,13 +602,21 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_range_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_range_request(peer_id, request_id, request) + .handle_blocks_by_range_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) .await; }; @@ -616,13 +630,21 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_roots_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_root_request(peer_id, request_id, request) + .handle_blocks_by_root_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) .await; }; @@ -636,12 +658,21 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_range_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_blobs_by_range_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_blobs_by_range_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -653,12 +684,21 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_roots_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_blobs_by_root_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_blobs_by_root_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -670,12 +710,21 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_roots_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_data_columns_by_root_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_data_columns_by_root_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -687,12 +736,21 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_range_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_data_columns_by_range_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_data_columns_by_range_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -704,12 +762,21 @@ impl NetworkBeaconProcessor { pub fn send_light_client_bootstrap_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: LightClientBootstrapRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_light_client_bootstrap(peer_id, request_id, request); + let process_fn = move || { + processor.handle_light_client_bootstrap( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -721,11 +788,19 @@ impl NetworkBeaconProcessor { pub fn send_light_client_optimistic_update_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_light_client_optimistic_update(peer_id, request_id); + let process_fn = move || { + processor.handle_light_client_optimistic_update( + peer_id, + connection_id, + substream_id, + request_id, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -737,10 +812,19 @@ impl NetworkBeaconProcessor { pub fn send_light_client_finality_update_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || processor.handle_light_client_finality_update(peer_id, request_id); + let process_fn = move || { + processor.handle_light_client_finality_update( + peer_id, + connection_id, + substream_id, + request_id, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -748,10 +832,36 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process a `LightClientUpdatesByRange` request from the RPC network. + pub fn send_light_client_updates_by_range_request( + self: &Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + request: LightClientUpdatesByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_light_client_updates_by_range( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::LightClientUpdatesByRangeRequest(Box::new(process_fn)), + }) + } + /// Send a message to `sync_tx`. /// /// Creates a log if there is an internal error. - fn send_sync_message(&self, message: SyncMessage) { + pub(crate) fn send_sync_message(&self, message: SyncMessage) { self.sync_tx.send(message).unwrap_or_else(|e| { debug!(self.log, "Could not send message to the sync service"; "error" => %e) @@ -767,6 +877,75 @@ impl NetworkBeaconProcessor { "error" => %e) }); } + + /// Attempt to reconstruct all data columns if the following conditions satisfies: + /// - Our custody requirement is all columns + /// - We have >= 50% of columns, but not all columns + /// + /// Returns `Some(AvailabilityProcessingStatus)` if reconstruction is successfully performed, + /// otherwise returns `None`. + async fn attempt_data_column_reconstruction( + &self, + block_root: Hash256, + ) -> Option { + let result = self.chain.reconstruct_data_columns(block_root).await; + match result { + Ok(Some((availability_processing_status, data_columns_to_publish))) => { + self.send_network_message(NetworkMessage::Publish { + messages: data_columns_to_publish + .iter() + .map(|d| { + let subnet = DataColumnSubnetId::from_column_index::( + d.index as usize, + &self.chain.spec, + ); + PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) + }) + .collect(), + }); + + match &availability_processing_status { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components available via reconstruction"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Block components still missing block after reconstruction"; + "result" => "imported all custody columns", + "block_hash" => %block_root, + ); + } + } + + Some(availability_processing_status) + } + Ok(None) => { + // reason is tracked via the `KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL` metric + trace!( + self.log, + "Reconstruction not required for block"; + "block_hash" => %block_root, + ); + None + } + Err(e) => { + error!( + self.log, + "Error during data column reconstruction"; + "block_root" => %block_root, + "error" => ?e + ); + None + } + } + } } type TestBeaconChainType = @@ -779,6 +958,7 @@ impl NetworkBeaconProcessor> { // processor (but not much else). pub fn null_for_testing( network_globals: Arc>, + sync_tx: UnboundedSender>, chain: Arc>>, executor: TaskExecutor, log: Logger, @@ -791,7 +971,6 @@ impl NetworkBeaconProcessor> { } = <_>::default(); let (network_tx, _network_rx) = mpsc::unbounded_channel(); - let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); let network_beacon_processor = Self { beacon_processor_send: beacon_processor_tx, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 0c98f5c17e5..c4944078fef 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -2,20 +2,22 @@ use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERA use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; -use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; +use beacon_chain::{BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use itertools::process_results; +use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; +use methods::LightClientUpdatesByRangeRequest; use slog::{debug, error, warn}; use slot_clock::SlotClock; use std::collections::{hash_map::Entry, HashMap}; use std::sync::Arc; use tokio_stream::StreamExt; use types::blob_sidecar::BlobIdentifier; -use types::{Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, Slot}; +use types::{Epoch, EthSpec, FixedBytesExtended, Hash256, Slot}; impl NetworkBeaconProcessor { /* Auxiliary functions */ @@ -33,11 +35,14 @@ impl NetworkBeaconProcessor { &self, peer_id: PeerId, response: Response, - id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) { self.send_network_message(NetworkMessage::SendResponse { peer_id, - id, + request_id, + id: (connection_id, substream_id), response, }) } @@ -45,15 +50,17 @@ impl NetworkBeaconProcessor { pub fn send_error_response( &self, peer_id: PeerId, - error: RPCResponseErrorCode, + error: RpcErrorResponse, reason: String, id: PeerRequestId, + request_id: RequestId, ) { self.send_network_message(NetworkMessage::SendErrorResponse { peer_id, error, reason, id, + request_id, }) } @@ -131,14 +138,24 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRootRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, self.clone() - .handle_blocks_by_root_request_inner(peer_id, request_id, request) + .handle_blocks_by_root_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) .await, Response::BlocksByRoot, ); @@ -148,9 +165,11 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request_inner( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRootRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { let log_results = |peer_id, requested_blocks, send_block_count| { debug!( self.log, @@ -169,10 +188,7 @@ impl NetworkBeaconProcessor { Ok(block_stream) => block_stream, Err(e) => { error!(self.log, "Error getting block stream"; "error" => ?e); - return Err(( - RPCResponseErrorCode::ServerError, - "Error getting block stream", - )); + return Err((RpcErrorResponse::ServerError, "Error getting block stream")); } }; // Fetching blocks is async because it may have to hit the execution layer for payloads. @@ -183,6 +199,8 @@ impl NetworkBeaconProcessor { self.send_response( peer_id, Response::BlocksByRoot(Some(block.clone())), + connection_id, + substream_id, request_id, ); send_block_count += 1; @@ -204,7 +222,7 @@ impl NetworkBeaconProcessor { ); log_results(peer_id, requested_blocks, send_block_count); return Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Execution layer not synced", )); } @@ -228,13 +246,23 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRootRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_blobs_by_root_request_inner(peer_id, request_id, request), + self.handle_blobs_by_root_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), Response::BlobsByRoot, ); } @@ -243,9 +271,11 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRootRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { let Some(requested_root) = request.blob_ids.as_slice().first().map(|id| id.block_root) else { // No blob ids requested. @@ -263,7 +293,13 @@ impl NetworkBeaconProcessor { for id in request.blob_ids.as_slice() { // First attempt to get the blobs from the RPC cache. if let Ok(Some(blob)) = self.chain.data_availability_checker.get_blob(id) { - self.send_response(peer_id, Response::BlobsByRoot(Some(blob)), request_id); + self.send_response( + peer_id, + Response::BlobsByRoot(Some(blob)), + connection_id, + substream_id, + request_id, + ); send_blob_count += 1; } else { let BlobIdentifier { @@ -285,6 +321,8 @@ impl NetworkBeaconProcessor { self.send_response( peer_id, Response::BlobsByRoot(Some(blob_sidecar.clone())), + connection_id, + substream_id, request_id, ); send_blob_count += 1; @@ -320,13 +358,23 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRootRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_data_columns_by_root_request_inner(peer_id, request_id, request), + self.handle_data_columns_by_root_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), Response::DataColumnsByRoot, ); } @@ -335,9 +383,11 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRootRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { let mut send_data_column_count = 0; for data_column_id in request.data_column_ids.as_slice() { @@ -350,6 +400,8 @@ impl NetworkBeaconProcessor { self.send_response( peer_id, Response::DataColumnsByRoot(Some(data_column)), + connection_id, + substream_id, request_id, ); } @@ -361,10 +413,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err(( - RPCResponseErrorCode::ServerError, - "Error getting data column", - )); + return Err((RpcErrorResponse::ServerError, "Error getting data column")); } } } @@ -380,20 +429,123 @@ impl NetworkBeaconProcessor { Ok(()) } + pub fn handle_light_client_updates_by_range( + self: &Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + request: LightClientUpdatesByRangeRequest, + ) { + self.terminate_response_stream( + peer_id, + connection_id, + substream_id, + request_id, + self.clone() + .handle_light_client_updates_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), + Response::LightClientUpdatesByRange, + ); + } + + /// Handle a `LightClientUpdatesByRange` request from the peer. + pub fn handle_light_client_updates_by_range_request_inner( + self: Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + req: LightClientUpdatesByRangeRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + debug!(self.log, "Received LightClientUpdatesByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_period" => req.start_period, + ); + + // Should not send more than max light client updates + let max_request_size: u64 = req.max_requested(); + if req.count > max_request_size { + return Err(( + RpcErrorResponse::InvalidRequest, + "Request exceeded max size", + )); + } + + let lc_updates = match self + .chain + .get_light_client_updates(req.start_period, req.count) + { + Ok(lc_updates) => lc_updates, + Err(e) => { + error!(self.log, "Unable to obtain light client updates"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + return Err((RpcErrorResponse::ServerError, "Database error")); + } + }; + + for lc_update in lc_updates.iter() { + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::LightClientUpdatesByRange(Some(Arc::new(lc_update.clone()))), + request_id, + id: (connection_id, substream_id), + }); + } + + let lc_updates_sent = lc_updates.len(); + + if lc_updates_sent < req.count as usize { + debug!( + self.log, + "LightClientUpdatesByRange outgoing response processed"; + "peer" => %peer_id, + "info" => "Failed to return all requested light client updates. The peer may have requested data ahead of whats currently available", + "start_period" => req.start_period, + "requested" => req.count, + "returned" => lc_updates_sent + ); + } else { + debug!( + self.log, + "LightClientUpdatesByRange outgoing response processed"; + "peer" => %peer_id, + "start_period" => req.start_period, + "requested" => req.count, + "returned" => lc_updates_sent + ); + } + + Ok(()) + } + /// Handle a `LightClientBootstrap` request from the peer. pub fn handle_light_client_bootstrap( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: LightClientBootstrapRequest, ) { self.terminate_response_single_item( peer_id, + connection_id, + substream_id, request_id, match self.chain.get_light_client_bootstrap(&request.root) { Ok(Some((bootstrap, _))) => Ok(Arc::new(bootstrap)), Ok(None) => Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Bootstrap not available".to_string(), )), Err(e) => { @@ -402,10 +554,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - Err(( - RPCResponseErrorCode::ResourceUnavailable, - format!("{:?}", e), - )) + Err((RpcErrorResponse::ResourceUnavailable, format!("{:?}", e))) } }, Response::LightClientBootstrap, @@ -416,10 +565,14 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_optimistic_update( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) { self.terminate_response_single_item( peer_id, + connection_id, + substream_id, request_id, match self .chain @@ -428,7 +581,7 @@ impl NetworkBeaconProcessor { { Some(update) => Ok(Arc::new(update)), None => Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Latest optimistic update not available".to_string(), )), }, @@ -440,10 +593,14 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_finality_update( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) { self.terminate_response_single_item( peer_id, + connection_id, + substream_id, request_id, match self .chain @@ -452,7 +609,7 @@ impl NetworkBeaconProcessor { { Some(update) => Ok(Arc::new(update)), None => Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Latest finality update not available".to_string(), )), }, @@ -464,14 +621,24 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlocksByRangeRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, self.clone() - .handle_blocks_by_range_request_inner(peer_id, request_id, req) + .handle_blocks_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + req, + ) .await, Response::BlocksByRange, ); @@ -481,9 +648,11 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request_inner( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlocksByRangeRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!(self.log, "Received BlocksByRange Request"; "peer_id" => %peer_id, "count" => req.count(), @@ -495,19 +664,15 @@ impl NetworkBeaconProcessor { self.chain .epoch() .map_or(self.chain.spec.max_request_blocks, |epoch| { - match self.chain.spec.fork_name_at_epoch(epoch) { - ForkName::Deneb | ForkName::Electra => { - self.chain.spec.max_request_blocks_deneb - } - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella => self.chain.spec.max_request_blocks, + if self.chain.spec.fork_name_at_epoch(epoch).deneb_enabled() { + self.chain.spec.max_request_blocks_deneb + } else { + self.chain.spec.max_request_blocks } }); if *req.count() > max_request_size { return Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Request exceeded max size", )); } @@ -517,17 +682,15 @@ impl NetworkBeaconProcessor { .forwards_iter_block_roots(Slot::from(*req.start_slot())) { Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot, - oldest_block_slot, - }, - )) => { + Err(BeaconChainError::HistoricalBlockOutOfRange { + slot, + oldest_block_slot, + }) => { debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot ); - return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { error!(self.log, "Unable to obtain root iter"; @@ -535,7 +698,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -566,7 +729,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Iteration error")); + return Err((RpcErrorResponse::ServerError, "Iteration error")); } }; @@ -607,7 +770,7 @@ impl NetworkBeaconProcessor { Ok(block_stream) => block_stream, Err(e) => { error!(self.log, "Error getting block stream"; "error" => ?e); - return Err((RPCResponseErrorCode::ServerError, "Iterator error")); + return Err((RpcErrorResponse::ServerError, "Iterator error")); } }; @@ -624,8 +787,9 @@ impl NetworkBeaconProcessor { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: Response::BlocksByRange(Some(block.clone())), - id: request_id, + id: (connection_id, substream_id), }); } } @@ -638,7 +802,7 @@ impl NetworkBeaconProcessor { "request_root" => ?root ); log_results(req, peer_id, blocks_sent); - return Err((RPCResponseErrorCode::ServerError, "Database inconsistency")); + return Err((RpcErrorResponse::ServerError, "Database inconsistency")); } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { debug!( @@ -650,7 +814,7 @@ impl NetworkBeaconProcessor { log_results(req, peer_id, blocks_sent); // send the stream terminator return Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Execution layer not synced", )); } @@ -677,7 +841,7 @@ impl NetworkBeaconProcessor { } log_results(req, peer_id, blocks_sent); // send the stream terminator - return Err((RPCResponseErrorCode::ServerError, "Failed fetching blocks")); + return Err((RpcErrorResponse::ServerError, "Failed fetching blocks")); } } } @@ -690,13 +854,23 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_range_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlobsByRangeRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_blobs_by_range_request_inner(peer_id, request_id, req), + self.handle_blobs_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + req, + ), Response::BlobsByRange, ); } @@ -705,9 +879,11 @@ impl NetworkBeaconProcessor { fn handle_blobs_by_range_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlobsByRangeRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!(self.log, "Received BlobsByRange Request"; "peer_id" => %peer_id, "count" => req.count, @@ -717,7 +893,7 @@ impl NetworkBeaconProcessor { // Should not send more than max request blocks if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { return Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", )); } @@ -728,10 +904,7 @@ impl NetworkBeaconProcessor { Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), None => { debug!(self.log, "Deneb fork is disabled"); - return Err(( - RPCResponseErrorCode::InvalidRequest, - "Deneb fork is disabled", - )); + return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled")); } }; @@ -752,12 +925,12 @@ impl NetworkBeaconProcessor { return if data_availability_boundary_slot < oldest_blob_slot { Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "blobs pruned within boundary", )) } else { Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Req outside availability period", )) }; @@ -766,17 +939,15 @@ impl NetworkBeaconProcessor { let forwards_block_root_iter = match self.chain.forwards_iter_block_roots(request_start_slot) { Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot, - oldest_block_slot, - }, - )) => { + Err(BeaconChainError::HistoricalBlockOutOfRange { + slot, + oldest_block_slot, + }) => { debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot ); - return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { error!(self.log, "Unable to obtain root iter"; @@ -784,7 +955,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -821,7 +992,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -854,7 +1025,8 @@ impl NetworkBeaconProcessor { self.send_network_message(NetworkMessage::SendResponse { peer_id, response: Response::BlobsByRange(Some(blob_sidecar.clone())), - id: request_id, + request_id, + id: (connection_id, substream_id), }); } } @@ -870,7 +1042,7 @@ impl NetworkBeaconProcessor { log_results(peer_id, req, blobs_sent); return Err(( - RPCResponseErrorCode::ServerError, + RpcErrorResponse::ServerError, "No blobs and failed fetching corresponding block", )); } @@ -885,13 +1057,23 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: DataColumnsByRangeRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_data_columns_by_range_request_inner(peer_id, request_id, req), + self.handle_data_columns_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + req, + ), Response::DataColumnsByRange, ); } @@ -900,9 +1082,11 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: DataColumnsByRangeRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!(self.log, "Received DataColumnsByRange Request"; "peer_id" => %peer_id, "count" => req.count, @@ -912,7 +1096,7 @@ impl NetworkBeaconProcessor { // Should not send more than max request data columns if req.max_requested::() > self.chain.spec.max_request_data_column_sidecars { return Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", )); } @@ -923,10 +1107,7 @@ impl NetworkBeaconProcessor { Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), None => { debug!(self.log, "Deneb fork is disabled"); - return Err(( - RPCResponseErrorCode::InvalidRequest, - "Deneb fork is disabled", - )); + return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled")); } }; @@ -948,12 +1129,12 @@ impl NetworkBeaconProcessor { return if data_availability_boundary_slot < oldest_data_column_slot { Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "blobs pruned within boundary", )) } else { Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Req outside availability period", )) }; @@ -962,17 +1143,15 @@ impl NetworkBeaconProcessor { let forwards_block_root_iter = match self.chain.forwards_iter_block_roots(request_start_slot) { Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot, - oldest_block_slot, - }, - )) => { + Err(BeaconChainError::HistoricalBlockOutOfRange { + slot, + oldest_block_slot, + }) => { debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot ); - return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { error!(self.log, "Unable to obtain root iter"; @@ -980,7 +1159,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -1017,7 +1196,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -1032,10 +1211,11 @@ impl NetworkBeaconProcessor { data_columns_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: Response::DataColumnsByRange(Some( data_column_sidecar.clone(), )), - id: request_id, + id: (connection_id, substream_id), }); } Ok(None) => {} // no-op @@ -1049,7 +1229,7 @@ impl NetworkBeaconProcessor { "error" => ?e ); return Err(( - RPCResponseErrorCode::ServerError, + RpcErrorResponse::ServerError, "No data columns and failed fetching corresponding block", )); } @@ -1080,8 +1260,10 @@ impl NetworkBeaconProcessor { fn terminate_response_single_item Response>( &self, peer_id: PeerId, - request_id: PeerRequestId, - result: Result, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + result: Result, into_response: F, ) { match result { @@ -1091,12 +1273,19 @@ impl NetworkBeaconProcessor { // https://github.com/sigp/lighthouse/blob/3058b96f2560f1da04ada4f9d8ba8e5651794ff6/beacon_node/lighthouse_network/src/rpc/handler.rs#L555-L558 self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: into_response(resp), - id: request_id, + id: (connection_id, substream_id), }); } Err((error_code, reason)) => { - self.send_error_response(peer_id, error_code, reason, request_id); + self.send_error_response( + peer_id, + error_code, + reason, + (connection_id, substream_id), + request_id, + ); } } } @@ -1106,18 +1295,27 @@ impl NetworkBeaconProcessor { fn terminate_response_stream) -> Response>( &self, peer_id: PeerId, - request_id: PeerRequestId, - result: Result<(), (RPCResponseErrorCode, &'static str)>, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + result: Result<(), (RpcErrorResponse, &'static str)>, into_response: F, ) { match result { Ok(_) => self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: into_response(None), - id: request_id, + id: (connection_id, substream_id), }), Err((error_code, reason)) => { - self.send_error_response(peer_id, error_code, reason.into(), request_id); + self.send_error_response( + peer_id, + error_code, + reason.into(), + (connection_id, substream_id), + request_id, + ); } } } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index dcad6160b34..d86dfae63a4 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -10,8 +10,8 @@ use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::data_availability_checker::MaybeAvailableBlock; use beacon_chain::data_column_verification::verify_kzg_for_data_column_list; use beacon_chain::{ - validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainError, - BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, + validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainTypes, + BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; use beacon_processor::{ work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, @@ -327,34 +327,37 @@ impl NetworkBeaconProcessor { _seen_timestamp: Duration, process_type: BlockProcessType, ) { - let result = self + let mut result = self .chain .process_rpc_custody_columns(custody_columns) .await; match &result { - Ok((availability, data_columns_to_publish)) => { - self.handle_data_columns_to_publish(data_columns_to_publish.clone()); - - match availability { - AvailabilityProcessingStatus::Imported(hash) => { - debug!( - self.log, - "Block components retrieved"; - "result" => "imported block and custody columns", - "block_hash" => %hash, - ); - self.chain.recompute_head_at_current_slot().await; - } - AvailabilityProcessingStatus::MissingComponents(_, _) => { - debug!( - self.log, - "Missing components over rpc"; - "block_hash" => %block_root, - ); + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components retrieved"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Missing components over rpc"; + "block_hash" => %block_root, + ); + // Attempt reconstruction here before notifying sync, to avoid sending out more requests + // that we may no longer need. + if let Some(availability) = + self.attempt_data_column_reconstruction(block_root).await + { + result = Ok(availability) } } - } + }, Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, @@ -374,7 +377,7 @@ impl NetworkBeaconProcessor { self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, - result: result.map(|(r, _)| r).into(), + result: result.into(), }); } @@ -603,103 +606,75 @@ impl NetworkBeaconProcessor { ); (imported_blocks, Ok(())) } - Err(error) => { + Err(e) => { metrics::inc_counter( &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_FAILED_TOTAL, ); - let err = match error { - // Handle the historical block errors specifically - BeaconChainError::HistoricalBlockError(e) => match e { - HistoricalBlockError::MismatchedBlockRoot { - block_root, - expected_block_root, - } => { - debug!( - self.log, - "Backfill batch processing error"; - "error" => "mismatched_block_root", - "block_root" => ?block_root, - "expected_root" => ?expected_block_root - ); - - ChainSegmentFailed { - message: String::from("mismatched_block_root"), - // The peer is faulty if they send blocks with bad roots. - peer_action: Some(PeerAction::LowToleranceError), - } - } - HistoricalBlockError::InvalidSignature - | HistoricalBlockError::SignatureSet(_) => { - warn!( - self.log, - "Backfill batch processing error"; - "error" => ?e - ); - - ChainSegmentFailed { - message: "invalid_signature".into(), - // The peer is faulty if they bad signatures. - peer_action: Some(PeerAction::LowToleranceError), - } - } - HistoricalBlockError::ValidatorPubkeyCacheTimeout => { - warn!( - self.log, - "Backfill batch processing error"; - "error" => "pubkey_cache_timeout" - ); - - ChainSegmentFailed { - message: "pubkey_cache_timeout".into(), - // This is an internal error, do not penalize the peer. - peer_action: None, - } - } - HistoricalBlockError::NoAnchorInfo => { - warn!(self.log, "Backfill not required"); - - ChainSegmentFailed { - message: String::from("no_anchor_info"), - // There is no need to do a historical sync, this is not a fault of - // the peer. - peer_action: None, - } - } - HistoricalBlockError::IndexOutOfBounds => { - error!( - self.log, - "Backfill batch OOB error"; - "error" => ?e, - ); - ChainSegmentFailed { - message: String::from("logic_error"), - // This should never occur, don't penalize the peer. - peer_action: None, - } - } - HistoricalBlockError::BlockOutOfRange { .. } => { - error!( - self.log, - "Backfill batch error"; - "error" => ?e, - ); - ChainSegmentFailed { - message: String::from("unexpected_error"), - // This should never occur, don't penalize the peer. - peer_action: None, - } - } - }, - other => { - warn!(self.log, "Backfill batch processing error"; "error" => ?other); - ChainSegmentFailed { - message: format!("{:?}", other), - // This is an internal error, don't penalize the peer. - peer_action: None, - } + let peer_action = match &e { + HistoricalBlockError::MismatchedBlockRoot { + block_root, + expected_block_root, + } => { + debug!( + self.log, + "Backfill batch processing error"; + "error" => "mismatched_block_root", + "block_root" => ?block_root, + "expected_root" => ?expected_block_root + ); + // The peer is faulty if they send blocks with bad roots. + Some(PeerAction::LowToleranceError) + } + HistoricalBlockError::InvalidSignature + | HistoricalBlockError::SignatureSet(_) => { + warn!( + self.log, + "Backfill batch processing error"; + "error" => ?e + ); + // The peer is faulty if they bad signatures. + Some(PeerAction::LowToleranceError) + } + HistoricalBlockError::ValidatorPubkeyCacheTimeout => { + warn!( + self.log, + "Backfill batch processing error"; + "error" => "pubkey_cache_timeout" + ); + // This is an internal error, do not penalize the peer. + None } + HistoricalBlockError::NoAnchorInfo => { + warn!(self.log, "Backfill not required"); + // There is no need to do a historical sync, this is not a fault of + // the peer. + None + } + HistoricalBlockError::IndexOutOfBounds => { + error!( + self.log, + "Backfill batch OOB error"; + "error" => ?e, + ); + // This should never occur, don't penalize the peer. + None + } + HistoricalBlockError::StoreError(e) => { + warn!(self.log, "Backfill batch processing error"; "error" => ?e); + // This is an internal error, don't penalize the peer. + None + } // + // Do not use a fallback match, handle all errors explicitly }; - (0, Err(err)) + let err_str: &'static str = e.into(); + ( + 0, + Err(ChainSegmentFailed { + message: format!("{:?}", err_str), + // This is an internal error, don't penalize the peer. + peer_action, + }), + ) } } } diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 6e8f151a05c..9d774d97c15 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -16,7 +16,7 @@ use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::BlobsByRangeRequest; -use lighthouse_network::rpc::SubstreamId; +use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ discv5::enr::{self, CombinedKey}, rpc::methods::{MetaData, MetaDataV2}, @@ -360,7 +360,9 @@ impl TestRig { self.network_beacon_processor .send_blobs_by_range_request( PeerId::random(), - (ConnectionId::new_unchecked(42), SubstreamId::new(24)), + ConnectionId::new_unchecked(42), + SubstreamId::new(24), + RequestId::new_unchecked(0), BlobsByRangeRequest { start_slot: 0, count, @@ -1137,6 +1139,7 @@ async fn test_blobs_by_range() { peer_id: _, response: Response::BlobsByRange(blob), id: _, + request_id: _, } = next { if blob.is_some() { diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 26c1d14f020..e1badfda9d5 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -15,10 +15,12 @@ use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, }; use futures::prelude::*; +use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::*; use lighthouse_network::{ + rpc, service::api_types::{AppRequestId, SyncRequestId}, - MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, + MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Response, }; use logging::TimeLatch; use slog::{crit, debug, o, trace}; @@ -56,7 +58,7 @@ pub enum RouterMessage { RPCRequestReceived { peer_id: PeerId, id: PeerRequestId, - request: Request, + request: rpc::Request, }, /// An RPC response has been received. RPCResponseReceived { @@ -191,51 +193,136 @@ impl Router { /* RPC - Related functionality */ /// A new RPC request has been received from the network. - fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: PeerRequestId, request: Request) { + fn handle_rpc_request( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + rpc_request: rpc::Request, + ) { if !self.network_globals.peers.read().is_connected(&peer_id) { - debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?request); + debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?rpc_request); return; } - match request { - Request::Status(status_message) => { - self.on_status_request(peer_id, request_id, status_message) - } - Request::BlocksByRange(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blocks_by_range_request(peer_id, request_id, request), + match rpc_request.r#type { + RequestType::Status(status_message) => self.on_status_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + status_message, ), - Request::BlocksByRoot(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blocks_by_roots_request(peer_id, request_id, request), + RequestType::BlocksByRange(request) => { + // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 + let mut count = *request.count(); + if *request.step() > 1 { + count = 1; + } + let blocks_request = match request { + methods::OldBlocksByRangeRequest::V1(req) => { + BlocksByRangeRequest::new_v1(req.start_slot, count) + } + methods::OldBlocksByRangeRequest::V2(req) => { + BlocksByRangeRequest::new(req.start_slot, count) + } + }; + + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blocks_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + blocks_request, + ), + ) + } + RequestType::BlocksByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blocks_by_roots_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::BlobsByRange(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blobs_by_range_request(peer_id, request_id, request), + RequestType::BlobsByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blobs_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::BlobsByRoot(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blobs_by_roots_request(peer_id, request_id, request), + RequestType::BlobsByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blobs_by_roots_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( + RequestType::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_roots_request(peer_id, request_id, request), + .send_data_columns_by_roots_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( + RequestType::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_range_request(peer_id, request_id, request), + .send_data_columns_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( + RequestType::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_bootstrap_request(peer_id, request_id, request), + .send_light_client_bootstrap_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::LightClientOptimisticUpdate => self.handle_beacon_processor_send_result( + RequestType::LightClientOptimisticUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_optimistic_update_request(peer_id, request_id), + .send_light_client_optimistic_update_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + ), ), - Request::LightClientFinalityUpdate => self.handle_beacon_processor_send_result( + RequestType::LightClientFinalityUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_finality_update_request(peer_id, request_id), + .send_light_client_finality_update_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + ), ), + RequestType::LightClientUpdatesByRange(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_light_client_updates_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), + ), + _ => {} } } @@ -275,7 +362,8 @@ impl Router { // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) - | Response::LightClientFinalityUpdate(_) => unreachable!(), + | Response::LightClientFinalityUpdate(_) + | Response::LightClientUpdatesByRange(_) => unreachable!(), } } @@ -461,7 +549,7 @@ impl Router { let status_message = status_message(&self.chain); debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); self.network - .send_processor_request(peer_id, Request::Status(status_message)); + .send_processor_request(peer_id, RequestType::Status(status_message)); } fn send_to_sync(&mut self, message: SyncMessage) { @@ -493,7 +581,9 @@ impl Router { pub fn on_status_request( &mut self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, status: StatusMessage, ) { debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); @@ -502,6 +592,7 @@ impl Router { self.network.send_response( peer_id, Response::Status(status_message(&self.chain)), + (connection_id, substream_id), request_id, ); @@ -745,7 +836,7 @@ impl HandlerNetworkContext { } /// Sends a request to the network task. - pub fn send_processor_request(&mut self, peer_id: PeerId, request: Request) { + pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { self.inform_network(NetworkMessage::SendRequest { peer_id, request_id: AppRequestId::Router, @@ -754,8 +845,15 @@ impl HandlerNetworkContext { } /// Sends a response to the network task. - pub fn send_response(&mut self, peer_id: PeerId, response: Response, id: PeerRequestId) { + pub fn send_response( + &mut self, + peer_id: PeerId, + response: Response, + id: PeerRequestId, + request_id: RequestId, + ) { self.inform_network(NetworkMessage::SendResponse { + request_id, peer_id, id, response, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 150402a7ab2..5a66cb7f30d 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -14,12 +14,13 @@ use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; use futures::StreamExt; +use lighthouse_network::rpc::{RequestId, RequestType}; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ - rpc::{GoodbyeReason, RPCResponseErrorCode}, - Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, + rpc::{GoodbyeReason, RpcErrorResponse}, + Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Response, Subnet, }; use lighthouse_network::{ service::api_types::AppRequestId, @@ -61,19 +62,21 @@ pub enum NetworkMessage { /// Send an RPC request to the libp2p service. SendRequest { peer_id: PeerId, - request: Request, + request: RequestType, request_id: AppRequestId, }, /// Send a successful Response to the libp2p service. SendResponse { peer_id: PeerId, + request_id: RequestId, response: Response, id: PeerRequestId, }, /// Sends an error response to an RPC request. SendErrorResponse { peer_id: PeerId, - error: RPCResponseErrorCode, + request_id: RequestId, + error: RpcErrorResponse, reason: String, id: PeerRequestId, }, @@ -623,16 +626,19 @@ impl NetworkService { peer_id, response, id, + request_id, } => { - self.libp2p.send_response(peer_id, id, response); + self.libp2p.send_response(peer_id, id, request_id, response); } NetworkMessage::SendErrorResponse { peer_id, error, id, + request_id, reason, } => { - self.libp2p.send_error_response(peer_id, id, error, reason); + self.libp2p + .send_error_response(peer_id, id, request_id, error, reason); } NetworkMessage::ValidationResult { propagation_source, @@ -807,7 +813,7 @@ impl NetworkService { } } } else { - for column_subnet in &self.network_globals.custody_subnets { + for column_subnet in &self.network_globals.sampling_subnets { for fork_digest in self.required_gossip_fork_digests() { let gossip_kind = Subnet::DataColumn(*column_subnet).into(); let topic = diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index c7c043f53f8..5e336d9c38e 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -13,7 +13,7 @@ use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; use types::{DataColumnSidecarList, SignedBeaconBlock}; -use super::single_block_lookup::DownloadResult; +use super::single_block_lookup::{ComponentRequests, DownloadResult}; use super::SingleLookupId; #[derive(Debug, Copy, Clone)] @@ -42,7 +42,7 @@ pub trait RequestState { &self, id: Id, peer_id: PeerId, - downloaded_block: Option>>, + expected_blobs: usize, cx: &mut SyncNetworkContext, ) -> Result; @@ -61,7 +61,7 @@ pub trait RequestState { fn response_type() -> ResponseType; /// A getter for the `BlockRequestState` or `BlobRequestState` associated with this trait. - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str>; /// A getter for a reference to the `SingleLookupRequestState` associated with this trait. fn get_state(&self) -> &SingleLookupRequestState; @@ -77,7 +77,7 @@ impl RequestState for BlockRequestState { &self, id: SingleLookupId, peer_id: PeerId, - _: Option>>, + _: usize, cx: &mut SyncNetworkContext, ) -> Result { cx.block_lookup_request(id, peer_id, self.requested_block_root) @@ -107,8 +107,8 @@ impl RequestState for BlockRequestState { fn response_type() -> ResponseType { ResponseType::Block } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { - &mut request.block_request_state + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + Ok(&mut request.block_request_state) } fn get_state(&self) -> &SingleLookupRequestState { &self.state @@ -125,10 +125,10 @@ impl RequestState for BlobRequestState { &self, id: Id, peer_id: PeerId, - downloaded_block: Option>>, + expected_blobs: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.blob_lookup_request(id, peer_id, self.block_root, downloaded_block) + cx.blob_lookup_request(id, peer_id, self.block_root, expected_blobs) .map_err(LookupRequestError::SendFailedNetwork) } @@ -150,8 +150,13 @@ impl RequestState for BlobRequestState { fn response_type() -> ResponseType { ResponseType::Blob } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { - &mut request.blob_request_state + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + match &mut request.component_requests { + ComponentRequests::WaitingForBlock => Err("waiting for block"), + ComponentRequests::ActiveBlobRequest(request, _) => Ok(request), + ComponentRequests::ActiveCustodyRequest { .. } => Err("expecting custody request"), + ComponentRequests::NotNeeded { .. } => Err("not needed"), + } } fn get_state(&self) -> &SingleLookupRequestState { &self.state @@ -169,10 +174,10 @@ impl RequestState for CustodyRequestState { id: Id, // TODO(das): consider selecting peers that have custody but are in this set _peer_id: PeerId, - downloaded_block: Option>>, + _: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.custody_lookup_request(id, self.block_root, downloaded_block) + cx.custody_lookup_request(id, self.block_root) .map_err(LookupRequestError::SendFailedNetwork) } @@ -200,8 +205,13 @@ impl RequestState for CustodyRequestState { fn response_type() -> ResponseType { ResponseType::CustodyColumn } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { - &mut request.custody_request_state + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + match &mut request.component_requests { + ComponentRequests::WaitingForBlock => Err("waiting for block"), + ComponentRequests::ActiveBlobRequest { .. } => Err("expecting blob request"), + ComponentRequests::ActiveCustodyRequest(request) => Ok(request), + ComponentRequests::NotNeeded { .. } => Err("not needed"), + } } fn get_state(&self) -> &SingleLookupRequestState { &self.state diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index a9dbf11fd06..5a11bca4814 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -28,6 +28,7 @@ use super::network_context::{PeerGroup, RpcResponseError, SyncNetworkContext}; use crate::metrics; use crate::sync::block_lookups::common::ResponseType; use crate::sync::block_lookups::parent_chain::find_oldest_fork_ancestor; +use crate::sync::SyncMessage; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_availability_checker::{ AvailabilityCheckError, AvailabilityCheckErrorCategory, @@ -49,13 +50,14 @@ use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; pub mod common; pub mod parent_chain; mod single_block_lookup; -#[cfg(test)] -mod tests; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth /// is further back than the most recent head slot. -pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; +/// +/// Have the same value as range's sync tolerance to consider a peer synced. Once sync lookup +/// reaches the maximum depth it will force trigger range sync. +pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE; const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 4; @@ -254,22 +256,59 @@ impl BlockLookups { // blocks on top of A forming A -> C. The malicious peer forces us to fetch C // from it, which will result in parent A hitting the chain_too_long error. Then // the valid chain A -> B is dropped too. - if let Ok(block_to_drop) = find_oldest_fork_ancestor(parent_chains, chain_idx) { - // Drop all lookups descending from the child of the too long parent chain - if let Some((lookup_id, lookup)) = self + // + // `find_oldest_fork_ancestor` should never return Err, unwrapping to tip for + // complete-ness + let parent_chain_tip = parent_chain.tip; + let block_to_drop = + find_oldest_fork_ancestor(parent_chains, chain_idx).unwrap_or(parent_chain_tip); + // Drop all lookups descending from the child of the too long parent chain + if let Some((lookup_id, lookup)) = self + .single_block_lookups + .iter() + .find(|(_, l)| l.block_root() == block_to_drop) + { + // If a lookup chain is too long, we can't distinguish a valid chain from a + // malicious one. We must attempt to sync this chain to not lose liveness. If + // the chain grows too long, we stop lookup sync and transition this head to + // forward range sync. We need to tell range sync which head to sync to, and + // from which peers. The lookup of the very tip of this chain may contain zero + // peers if it's the parent-child lookup. So we do a bit of a trick here: + // - Tell range sync to sync to the tip's root (if available, else its ancestor) + // - But use all peers in the ancestor lookup, which should have at least one + // peer, and its peer set is a strict superset of the tip's lookup. + if let Some((_, tip_lookup)) = self .single_block_lookups .iter() - .find(|(_, l)| l.block_root() == block_to_drop) + .find(|(_, l)| l.block_root() == parent_chain_tip) { - for &peer_id in lookup.all_peers() { - cx.report_peer( - peer_id, - PeerAction::LowToleranceError, - "chain_too_long", - ); - } - self.drop_lookup_and_children(*lookup_id); + cx.send_sync_message(SyncMessage::AddPeersForceRangeSync { + peers: lookup.all_peers().copied().collect(), + head_slot: tip_lookup.peek_downloaded_block_slot(), + head_root: parent_chain_tip, + }); + } else { + // Should never happen, log error and continue the lookup drop + error!(self.log, "Unable to transition lookup to range sync"; + "error" => "Parent chain tip lookup not found", + "block_root" => ?parent_chain_tip + ); } + + // Do not downscore peers here. Because we can't distinguish a valid chain from + // a malicious one we may penalize honest peers for attempting to discover us a + // valid chain. Until blocks_by_range allows to specify a tip, for example with + // https://github.com/ethereum/consensus-specs/pull/3845 we will have poor + // attributability. A peer can send us garbage blocks over blocks_by_root, and + // then correct blocks via blocks_by_range. + + self.drop_lookup_and_children(*lookup_id); + } else { + // Should never happen + error!(self.log, "Unable to transition lookup to range sync"; + "error" => "Block to drop lookup not found", + "block_root" => ?block_to_drop + ); } return false; @@ -409,7 +448,9 @@ impl BlockLookups { }; let block_root = lookup.block_root(); - let request_state = R::request_state_mut(lookup).get_state_mut(); + let request_state = R::request_state_mut(lookup) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))? + .get_state_mut(); match response { Ok((response, peer_group, seen_timestamp)) => { @@ -504,7 +545,9 @@ impl BlockLookups { }; let block_root = lookup.block_root(); - let request_state = R::request_state_mut(lookup).get_state_mut(); + let request_state = R::request_state_mut(lookup) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))? + .get_state_mut(); debug!( self.log, diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 73ffcd43845..d701cbbb8d3 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -4,7 +4,7 @@ use crate::sync::network_context::{ LookupRequestResult, PeerGroup, ReqId, RpcRequestSendError, SendErrorProcessor, SyncNetworkContext, }; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChainTypes, BlockProcessStatus}; use derivative::Derivative; use lighthouse_network::service::api_types::Id; use rand::seq::IteratorRandom; @@ -15,7 +15,7 @@ use std::time::{Duration, Instant}; use store::Hash256; use strum::IntoStaticStr; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock}; +use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock, Slot}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -62,8 +62,7 @@ pub enum LookupRequestError { pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, - pub blob_request_state: BlobRequestState, - pub custody_request_state: CustodyRequestState, + pub component_requests: ComponentRequests, /// Peers that claim to have imported this set of block components #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] peers: HashSet, @@ -72,6 +71,16 @@ pub struct SingleBlockLookup { created: Instant, } +#[derive(Debug)] +pub(crate) enum ComponentRequests { + WaitingForBlock, + ActiveBlobRequest(BlobRequestState, usize), + ActiveCustodyRequest(CustodyRequestState), + // When printing in debug this state display the reason why it's not needed + #[allow(dead_code)] + NotNeeded(&'static str), +} + impl SingleBlockLookup { pub fn new( requested_block_root: Hash256, @@ -82,8 +91,7 @@ impl SingleBlockLookup { Self { id, block_request_state: BlockRequestState::new(requested_block_root), - blob_request_state: BlobRequestState::new(requested_block_root), - custody_request_state: CustodyRequestState::new(requested_block_root), + component_requests: ComponentRequests::WaitingForBlock, peers: HashSet::from_iter(peers.iter().copied()), block_root: requested_block_root, awaiting_parent, @@ -91,6 +99,14 @@ impl SingleBlockLookup { } } + /// Return the slot of this lookup's block if it's currently cached as `AwaitingProcessing` + pub fn peek_downloaded_block_slot(&self) -> Option { + self.block_request_state + .state + .peek_downloaded_data() + .map(|block| block.slot()) + } + /// Get the block root that is being requested. pub fn block_root(&self) -> Hash256 { self.block_root @@ -142,16 +158,28 @@ impl SingleBlockLookup { /// Returns true if the block has already been downloaded. pub fn all_components_processed(&self) -> bool { self.block_request_state.state.is_processed() - && self.blob_request_state.state.is_processed() - && self.custody_request_state.state.is_processed() + && match &self.component_requests { + ComponentRequests::WaitingForBlock => false, + ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), + ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), + ComponentRequests::NotNeeded { .. } => true, + } } /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { self.awaiting_parent.is_some() || self.block_request_state.state.is_awaiting_event() - || self.blob_request_state.state.is_awaiting_event() - || self.custody_request_state.state.is_awaiting_event() + || match &self.component_requests { + ComponentRequests::WaitingForBlock => true, + ComponentRequests::ActiveBlobRequest(request, _) => { + request.state.is_awaiting_event() + } + ComponentRequests::ActiveCustodyRequest(request) => { + request.state.is_awaiting_event() + } + ComponentRequests::NotNeeded { .. } => false, + } } /// Makes progress on all requests of this lookup. Any error is not recoverable and must result @@ -161,9 +189,66 @@ impl SingleBlockLookup { cx: &mut SyncNetworkContext, ) -> Result { // TODO: Check what's necessary to download, specially for blobs - self.continue_request::>(cx)?; - self.continue_request::>(cx)?; - self.continue_request::>(cx)?; + self.continue_request::>(cx, 0)?; + + if let ComponentRequests::WaitingForBlock = self.component_requests { + let downloaded_block = self + .block_request_state + .state + .peek_downloaded_data() + .cloned(); + + if let Some(block) = downloaded_block.or_else(|| { + // If the block is already being processed or fully validated, retrieve how many blobs + // it expects. Consider any stage of the block. If the block root has been validated, we + // can assert that this is the correct value of `blob_kzg_commitments_count`. + match cx.chain.get_block_process_status(&self.block_root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), + } + }) { + let expected_blobs = block.num_expected_blobs(); + let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + if expected_blobs == 0 { + self.component_requests = ComponentRequests::NotNeeded("no data"); + } + if cx.chain.should_fetch_blobs(block_epoch) { + self.component_requests = ComponentRequests::ActiveBlobRequest( + BlobRequestState::new(self.block_root), + expected_blobs, + ); + } else if cx.chain.should_fetch_custody_columns(block_epoch) { + self.component_requests = ComponentRequests::ActiveCustodyRequest( + CustodyRequestState::new(self.block_root), + ); + } else { + self.component_requests = ComponentRequests::NotNeeded("outside da window"); + } + } else { + // Wait to download the block before downloading blobs. Then we can be sure that the + // block has data, so there's no need to do "blind" requests for all possible blobs and + // latter handle the case where if the peer sent no blobs, penalize. + // + // Lookup sync event safety: Reaching this code means that a block is not in any pre-import + // cache nor in the request state of this lookup. Therefore, the block must either: (1) not + // be downloaded yet or (2) the block is already imported into the fork-choice. + // In case (1) the lookup must either successfully download the block or get dropped. + // In case (2) the block will be downloaded, processed, reach `DuplicateFullyImported` + // and get dropped as completed. + } + } + + match &self.component_requests { + ComponentRequests::WaitingForBlock => {} // do nothing + ComponentRequests::ActiveBlobRequest(_, expected_blobs) => { + self.continue_request::>(cx, *expected_blobs)? + } + ComponentRequests::ActiveCustodyRequest(_) => { + self.continue_request::>(cx, 0)? + } + ComponentRequests::NotNeeded { .. } => {} // do nothing + } // If all components of this lookup are already processed, there will be no future events // that can make progress so it must be dropped. Consider the lookup completed. @@ -179,15 +264,12 @@ impl SingleBlockLookup { fn continue_request>( &mut self, cx: &mut SyncNetworkContext, + expected_blobs: usize, ) -> Result<(), LookupRequestError> { let id = self.id; let awaiting_parent = self.awaiting_parent.is_some(); - let downloaded_block = self - .block_request_state - .state - .peek_downloaded_data() - .cloned(); - let request = R::request_state_mut(self); + let request = + R::request_state_mut(self).map_err(|e| LookupRequestError::BadState(e.to_owned()))?; // Attempt to progress awaiting downloads if request.get_state().is_awaiting_download() { @@ -206,13 +288,16 @@ impl SingleBlockLookup { // not receive any new peers for some time it will be dropped. If it receives a new // peer it must attempt to make progress. R::request_state_mut(self) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))? .get_state_mut() .update_awaiting_download_status("no peers"); return Ok(()); }; - let request = R::request_state_mut(self); - match request.make_request(id, peer_id, downloaded_block, cx)? { + let request = R::request_state_mut(self) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))?; + + match request.make_request(id, peer_id, expected_blobs, cx)? { LookupRequestResult::RequestSent(req_id) => { // Lookup sync event safety: If make_request returns `RequestSent`, we are // guaranteed that `BlockLookups::on_download_response` will be called exactly diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f1417804849..344e91711c4 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,9 +38,9 @@ use super::block_lookups::BlockLookups; use super::network_context::{ BlockOrBlob, CustodyByRootResult, RangeRequestId, RpcEvent, SyncNetworkContext, }; +use super::peer_sampling::{Sampling, SamplingConfig, SamplingResult}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use super::sampling::{Sampling, SamplingConfig, SamplingResult}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; @@ -71,6 +71,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::{BlobSidecar, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; +#[cfg(test)] +use types::ColumnIndex; + /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a /// fully sync'd peer. @@ -91,6 +94,15 @@ pub enum SyncMessage { /// A useful peer has been discovered. AddPeer(PeerId, SyncInfo), + /// Force trigger range sync for a set of peers given a head they claim to have imported. Used + /// by block lookup to trigger range sync if a parent chain grows too large. + AddPeersForceRangeSync { + peers: Vec, + head_root: Hash256, + /// Sync lookup may not know the Slot of this head. However this situation is very rare. + head_slot: Option, + }, + /// A block has been received from the RPC. RpcBlock { request_id: SyncRequestId, @@ -319,6 +331,13 @@ impl SyncManager { .collect() } + #[cfg(test)] + pub(crate) fn get_range_sync_chains( + &self, + ) -> Result, &'static str> { + self.range_sync.state() + } + #[cfg(test)] pub(crate) fn get_failed_chains(&mut self) -> Vec { self.block_lookups.get_failed_chains() @@ -334,6 +353,15 @@ impl SyncManager { self.sampling.active_sampling_requests() } + #[cfg(test)] + pub(crate) fn get_sampling_request_status( + &self, + block_root: Hash256, + index: &ColumnIndex, + ) -> Option { + self.sampling.get_request_status(block_root, index) + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } @@ -362,11 +390,30 @@ impl SyncManager { let sync_type = remote_sync_type(&local, &remote, &self.chain); // update the state of the peer. - let should_add = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type); - - if matches!(sync_type, PeerSyncType::Advanced) && should_add { - self.range_sync - .add_peer(&mut self.network, local, peer_id, remote); + let is_still_connected = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type); + if is_still_connected { + match sync_type { + PeerSyncType::Behind => {} // Do nothing + PeerSyncType::Advanced => { + self.range_sync + .add_peer(&mut self.network, local, peer_id, remote); + } + PeerSyncType::FullySynced => { + // Sync considers this peer close enough to the head to not trigger range sync. + // Range sync handles well syncing large ranges of blocks, of a least a few blocks. + // However this peer may be in a fork that we should sync but we have not discovered + // yet. If the head of the peer is unknown, attempt block lookup first. If the + // unknown head turns out to be on a longer fork, it will trigger range sync. + // + // A peer should always be considered `Advanced` if its finalized root is + // unknown and ahead of ours, so we don't check for that root here. + // + // TODO: This fork-choice check is potentially duplicated, review code + if !self.chain.block_is_known_to_fork_choice(&remote.head_root) { + self.handle_unknown_block_root(peer_id, remote.head_root); + } + } + } } self.update_sync_state(); @@ -377,6 +424,44 @@ impl SyncManager { } } + /// Trigger range sync for a set of peers that claim to have imported a head unknown to us. + fn add_peers_force_range_sync( + &mut self, + peers: &[PeerId], + head_root: Hash256, + head_slot: Option, + ) { + let status = self.chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, + }; + + let head_slot = head_slot.unwrap_or_else(|| { + debug!(self.log, + "On add peers force range sync assuming local head_slot"; + "local_head_slot" => local.head_slot, + "head_root" => ?head_root + ); + local.head_slot + }); + + let remote = SyncInfo { + head_slot, + head_root, + // Set finalized to same as local to trigger Head sync + finalized_epoch: local.finalized_epoch, + finalized_root: local.finalized_root, + }; + + for peer_id in peers { + self.range_sync + .add_peer(&mut self.network, local.clone(), *peer_id, remote.clone()); + } + } + /// Handles RPC errors related to requests that were emitted from the sync manager. fn inject_error(&mut self, peer_id: PeerId, request_id: SyncRequestId, error: RPCError) { trace!(self.log, "Sync manager received a failed RPC"); @@ -387,13 +472,9 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } - SyncRequestId::DataColumnsByRoot(req_id, requester) => self - .on_data_columns_by_root_response( - req_id, - requester, - peer_id, - RpcEvent::RPCError(error), - ), + SyncRequestId::DataColumnsByRoot(req_id) => { + self.on_data_columns_by_root_response(req_id, peer_id, RpcEvent::RPCError(error)) + } SyncRequestId::RangeBlockAndBlobs { id } => { if let Some(sender_id) = self.network.range_request_failed(id) { match sender_id { @@ -462,8 +543,7 @@ impl SyncManager { } /// Updates the syncing state of a peer. - /// Return whether the peer should be used for range syncing or not, according to its - /// connection status. + /// Return true if the peer is still connected and known to the peers DB fn update_peer_sync_state( &mut self, peer_id: &PeerId, @@ -672,6 +752,13 @@ impl SyncManager { SyncMessage::AddPeer(peer_id, info) => { self.add_peer(peer_id, info); } + SyncMessage::AddPeersForceRangeSync { + peers, + head_root, + head_slot, + } => { + self.add_peers_force_range_sync(&peers, head_root, head_slot); + } SyncMessage::RpcBlock { request_id, peer_id, @@ -1013,10 +1100,9 @@ impl SyncManager { seen_timestamp: Duration, ) { match request_id { - SyncRequestId::DataColumnsByRoot(req_id, requester) => { + SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response( req_id, - requester, peer_id, match data_column { Some(data_column) => RpcEvent::Response(data_column, seen_timestamp), @@ -1058,7 +1144,6 @@ impl SyncManager { fn on_data_columns_by_root_response( &mut self, req_id: DataColumnsByRootRequestId, - requester: DataColumnsByRootRequester, peer_id: PeerId, data_column: RpcEvent>>, ) { @@ -1066,7 +1151,7 @@ impl SyncManager { self.network .on_data_columns_by_root_response(req_id, peer_id, data_column) { - match requester { + match req_id.requester { DataColumnsByRootRequester::Sampling(id) => { if let Some((requester, result)) = self.sampling @@ -1103,22 +1188,14 @@ impl SyncManager { } fn on_sampling_result(&mut self, requester: SamplingRequester, result: SamplingResult) { - // TODO(das): How is a consumer of sampling results? - // - Fork-choice for trailing DA - // - Single lookups to complete import requirements - // - Range sync to complete import requirements? Can sampling for syncing lag behind and - // accumulate in fork-choice? - match requester { SamplingRequester::ImportedBlock(block_root) => { debug!(self.log, "Sampling result"; "block_root" => %block_root, "result" => ?result); - // TODO(das): Consider moving SamplingResult to the beacon_chain crate and import - // here. No need to add too much enum variants, just whatever the beacon_chain or - // fork-choice needs to make a decision. Currently the fork-choice only needs to - // be notified of successful samplings, i.e. sampling failures don't trigger pruning match result { Ok(_) => { + // Notify the fork-choice of a successful sampling result to mark the block + // branch as safe. if let Err(e) = self .network .beacon_processor() diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 6669add4453..0f5fd6fb9f1 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -6,9 +6,11 @@ mod block_lookups; mod block_sidecar_coupling; pub mod manager; mod network_context; +mod peer_sampling; mod peer_sync_info; mod range_sync; -mod sampling; +#[cfg(test)] +mod tests; pub use lighthouse_network::service::api_types::SamplingId; pub use manager::{BatchProcessResult, SyncMessage}; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 5b7003e5e85..c4d987e8582 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -2,11 +2,11 @@ //! channel and stores a global RPC ID to perform requests. use self::custody::{ActiveCustodyRequest, Error as CustodyRequestError}; -use self::requests::{ActiveBlobsByRootRequest, ActiveBlocksByRootRequest}; pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlockRequest}; use super::block_sidecar_coupling::RangeBlockComponentsRequest; use super::manager::BlockProcessType; use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; +use super::SyncMessage; use crate::metrics; use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::NetworkMessage; @@ -17,17 +17,23 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; -use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, + OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2, +}; +use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType}; use lighthouse_network::service::api_types::{ AppRequestId, CustodyId, CustodyRequester, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, }; -use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; +use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; use rand::seq::SliceRandom; use rand::thread_rng; -use requests::ActiveDataColumnsByRootRequest; pub use requests::LookupVerifyError; +use requests::{ + ActiveRequests, BlobsByRootRequestItems, BlocksByRootRequestItems, + DataColumnsByRootRequestItems, +}; use slog::{debug, error, warn}; use std::collections::hash_map::Entry; use std::collections::HashMap; @@ -176,18 +182,17 @@ pub struct SyncNetworkContext { request_id: Id, /// A mapping of active BlocksByRoot requests, including both current slot and parent lookups. - blocks_by_root_requests: FnvHashMap, - + blocks_by_root_requests: + ActiveRequests>, /// A mapping of active BlobsByRoot requests, including both current slot and parent lookups. - blobs_by_root_requests: FnvHashMap>, + blobs_by_root_requests: ActiveRequests>, + /// A mapping of active DataColumnsByRoot requests + data_columns_by_root_requests: + ActiveRequests>, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, - /// A mapping of active DataColumnsByRoot requests - data_columns_by_root_requests: - FnvHashMap>, - /// BlocksByRange requests paired with BlobsByRange range_block_components_requests: FnvHashMap)>, @@ -235,9 +240,9 @@ impl SyncNetworkContext { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start request_id: 1, - blocks_by_root_requests: <_>::default(), - blobs_by_root_requests: <_>::default(), - data_columns_by_root_requests: <_>::default(), + blocks_by_root_requests: ActiveRequests::new("blocks_by_root"), + blobs_by_root_requests: ActiveRequests::new("blobs_by_root"), + data_columns_by_root_requests: ActiveRequests::new("data_columns_by_root"), custody_by_root_requests: <_>::default(), range_block_components_requests: FnvHashMap::default(), network_beacon_processor, @@ -246,6 +251,11 @@ impl SyncNetworkContext { } } + pub fn send_sync_message(&mut self, sync_message: SyncMessage) { + self.network_beacon_processor + .send_sync_message(sync_message); + } + /// Returns the ids of all the requests made to the given peer_id. pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Vec { let failed_range_ids = @@ -261,34 +271,19 @@ impl SyncNetworkContext { let failed_block_ids = self .blocks_by_root_requests - .iter() - .filter_map(|(id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::SingleBlock { id: *id }) - } else { - None - } - }); + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleBlock { id: *id }); let failed_blob_ids = self .blobs_by_root_requests - .iter() - .filter_map(|(id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::SingleBlob { id: *id }) - } else { - None - } - }); - let failed_data_column_by_root_ids = - self.data_columns_by_root_requests - .iter() - .filter_map(|(req_id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::DataColumnsByRoot(*req_id, request.requester)) - } else { - None - } - }); + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleBlob { id: *id }); + let failed_data_column_by_root_ids = self + .data_columns_by_root_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::DataColumnsByRoot(*req_id)); failed_range_ids .chain(failed_block_ids) @@ -336,7 +331,7 @@ impl SyncNetworkContext { "head_slot" => %status_message.head_slot, ); - let request = Request::Status(status_message.clone()); + let request = RequestType::Status(status_message.clone()); let request_id = AppRequestId::Router; let _ = self.send_network_msg(NetworkMessage::SendRequest { peer_id, @@ -365,10 +360,26 @@ impl SyncNetworkContext { "epoch" => epoch, "peer" => %peer_id, ); + let rpc_request = match request { + BlocksByRangeRequest::V1(ref req) => { + RequestType::BlocksByRange(OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { + start_slot: req.start_slot, + count: req.count, + step: 1, + })) + } + BlocksByRangeRequest::V2(ref req) => { + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: req.start_slot, + count: req.count, + step: 1, + })) + } + }; self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlocksByRange(request.clone()), + request: rpc_request, request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -387,7 +398,7 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlobsByRange(BlobsByRangeRequest { + request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: *request.start_slot(), count: *request.count(), }), @@ -399,13 +410,13 @@ impl SyncNetworkContext { false }; - let (expects_custody_columns, num_of_custody_column_req) = + let (expects_columns, num_of_column_req) = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { - let custody_indexes = self.network_globals().custody_columns.clone(); + let column_indexes = self.network_globals().sampling_columns.clone(); let mut num_of_custody_column_req = 0; for (peer_id, columns_by_range_request) in - self.make_columns_by_range_requests(request, &custody_indexes)? + self.make_columns_by_range_requests(request, &column_indexes)? { requested_peers.push(peer_id); @@ -421,7 +432,7 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request: Request::DataColumnsByRange(columns_by_range_request), + request: RequestType::DataColumnsByRange(columns_by_range_request), request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -429,15 +440,15 @@ impl SyncNetworkContext { num_of_custody_column_req += 1; } - (Some(custody_indexes), Some(num_of_custody_column_req)) + (Some(column_indexes), Some(num_of_custody_column_req)) } else { (None, None) }; let info = RangeBlockComponentsRequest::new( expected_blobs, - expects_custody_columns, - num_of_custody_column_req, + expects_columns, + num_of_column_req, requested_peers, ); self.range_block_components_requests @@ -585,13 +596,19 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlocksByRoot(request.into_request(&self.chain.spec)), + request: RequestType::BlocksByRoot(request.into_request(&self.chain.spec)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - self.blocks_by_root_requests - .insert(id, ActiveBlocksByRootRequest::new(request, peer_id)); + self.blocks_by_root_requests.insert( + id, + peer_id, + // true = enforce max_requests as returned for blocks_by_root. We always request a single + // block and the peer must have it. + true, + BlocksByRootRequestItems::new(request), + ); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -607,49 +624,12 @@ impl SyncNetworkContext { lookup_id: SingleLookupId, peer_id: PeerId, block_root: Hash256, - downloaded_block: Option>>, + expected_blobs: usize, ) -> Result { - let Some(block) = downloaded_block.or_else(|| { - // If the block is already being processed or fully validated, retrieve how many blobs - // it expects. Consider any stage of the block. If the block root has been validated, we - // can assert that this is the correct value of `blob_kzg_commitments_count`. - match self.chain.get_block_process_status(&block_root) { - BlockProcessStatus::Unknown => None, - BlockProcessStatus::NotValidated(block) - | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), - } - }) else { - // Wait to download the block before downloading blobs. Then we can be sure that the - // block has data, so there's no need to do "blind" requests for all possible blobs and - // latter handle the case where if the peer sent no blobs, penalize. - // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. - // - if `num_expected_blobs` returns Some = block is processed. - // - // Lookup sync event safety: Reaching this code means that a block is not in any pre-import - // cache nor in the request state of this lookup. Therefore, the block must either: (1) not - // be downloaded yet or (2) the block is already imported into the fork-choice. - // In case (1) the lookup must either successfully download the block or get dropped. - // In case (2) the block will be downloaded, processed, reach `DuplicateFullyImported` - // and get dropped as completed. - return Ok(LookupRequestResult::Pending("waiting for block download")); - }; - let expected_blobs = block.num_expected_blobs(); - let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - - // Check if we are in deneb, before peerdas and inside da window - if !self.chain.should_fetch_blobs(block_epoch) { - return Ok(LookupRequestResult::NoRequestNeeded("blobs not required")); - } - - // No data required for this block - if expected_blobs == 0 { - return Ok(LookupRequestResult::NoRequestNeeded("no data")); - } - let imported_blob_indexes = self .chain .data_availability_checker - .imported_blob_indexes(&block_root) + .cached_blob_indexes(&block_root) .unwrap_or_default(); // Include only the blob indexes not yet imported (received through gossip) let indices = (0..expected_blobs as u64) @@ -683,13 +663,20 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlobsByRoot(request.clone().into_request(&self.chain.spec)), + request: RequestType::BlobsByRoot(request.clone().into_request(&self.chain.spec)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - self.blobs_by_root_requests - .insert(id, ActiveBlobsByRootRequest::new(request, peer_id)); + self.blobs_by_root_requests.insert( + id, + peer_id, + // true = enforce max_requests are returned for blobs_by_root. We only issue requests for + // blocks after we know the block has data, and only request peers after they claim to + // have imported the block+blobs. + true, + BlobsByRootRequestItems::new(request), + ); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -700,8 +687,12 @@ impl SyncNetworkContext { requester: DataColumnsByRootRequester, peer_id: PeerId, request: DataColumnsByRootSingleBlockRequest, + expect_max_responses: bool, ) -> Result, &'static str> { - let req_id = DataColumnsByRootRequestId(self.next_id()); + let req_id = DataColumnsByRootRequestId { + id: self.next_id(), + requester, + }; debug!( self.log, "Sending DataColumnsByRoot Request"; @@ -715,13 +706,15 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request: Request::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id, requester)), + request: RequestType::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id)), })?; self.data_columns_by_root_requests.insert( req_id, - ActiveDataColumnsByRootRequest::new(request, peer_id, requester), + peer_id, + expect_max_responses, + DataColumnsByRootRequestItems::new(request), ); Ok(LookupRequestResult::RequestSent(req_id)) @@ -735,45 +728,17 @@ impl SyncNetworkContext { &mut self, lookup_id: SingleLookupId, block_root: Hash256, - downloaded_block: Option>>, ) -> Result { - let Some(block) = - downloaded_block.or_else(|| match self.chain.get_block_process_status(&block_root) { - BlockProcessStatus::Unknown => None, - BlockProcessStatus::NotValidated(block) - | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), - }) - else { - // Wait to download the block before downloading columns. Then we can be sure that the - // block has data, so there's no need to do "blind" requests for all possible columns and - // latter handle the case where if the peer sent no columns, penalize. - // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. - // - if `num_expected_blobs` returns Some = block is processed. - return Ok(LookupRequestResult::Pending("waiting for block download")); - }; - let expected_blobs = block.num_expected_blobs(); - let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - - // Check if we are into peerdas and inside da window - if !self.chain.should_fetch_custody_columns(block_epoch) { - return Ok(LookupRequestResult::NoRequestNeeded("columns not required")); - } - - // No data required for this block - if expected_blobs == 0 { - return Ok(LookupRequestResult::NoRequestNeeded("no data")); - } - let custody_indexes_imported = self .chain .data_availability_checker - .imported_custody_column_indexes(&block_root) + .cached_data_column_indexes(&block_root) .unwrap_or_default(); // Include only the blob indexes not yet imported (received through gossip) let custody_indexes_to_fetch = self .network_globals() - .custody_columns + .sampling_columns .clone() .into_iter() .filter(|index| !custody_indexes_imported.contains(index)) @@ -804,7 +769,6 @@ impl SyncNetworkContext { self.log.clone(), ); - // TODO(das): start request // Note that you can only send, but not handle a response here match request.continue_requests(self) { Ok(_) => { @@ -814,7 +778,6 @@ impl SyncNetworkContext { self.custody_by_root_requests.insert(requester, request); Ok(LookupRequestResult::RequestSent(req_id)) } - // TODO(das): handle this error properly Err(e) => Err(RpcRequestSendError::CustodyRequestError(e)), } } @@ -955,142 +918,74 @@ impl SyncNetworkContext { // Request handlers - pub fn on_single_block_response( + pub(crate) fn on_single_block_response( &mut self, - request_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>> { - let Entry::Occupied(mut request) = self.blocks_by_root_requests.entry(request_id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blocks_by_root"]); - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(block, seen_timestamp) => { - match request.get_mut().add_response(block) { - Ok(block) => Ok((block, seen_timestamp)), - Err(e) => { - // The request must be dropped after receiving an error. - request.remove(); - Err(e.into()) - } + let response = self.blocks_by_root_requests.on_response(id, rpc_event); + let response = response.map(|res| { + res.and_then(|(mut blocks, seen_timestamp)| { + // Enforce that exactly one chunk = one block is returned. ReqResp behavior limits the + // response count to at most 1. + match blocks.pop() { + Some(block) => Ok((block, seen_timestamp)), + // Should never happen, `blocks_by_root_requests` enforces that we receive at least + // 1 chunk. + None => Err(LookupVerifyError::NotEnoughResponsesReturned { actual: 0 }.into()), } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - Err(e) => Err(e.into()), - }, - RpcEvent::RPCError(e) => { - request.remove(); - Err(e.into()) - } - }; - - if let Err(RpcResponseError::VerifyError(e)) = &resp { + }) + }); + if let Some(Err(RpcResponseError::VerifyError(e))) = &response { self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } - Some(resp) + response } - pub fn on_single_blob_response( + pub(crate) fn on_single_blob_response( &mut self, - request_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>> { - let Entry::Occupied(mut request) = self.blobs_by_root_requests.entry(request_id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blobs_by_root"]); - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(blob, seen_timestamp) => { - let request = request.get_mut(); - match request.add_response(blob) { - Ok(Some(blobs)) => to_fixed_blob_sidecar_list(blobs) - .map(|blobs| (blobs, seen_timestamp)) - .map_err(|e| (e.into(), request.resolve())), - Ok(None) => return None, - Err(e) => Err((e.into(), request.resolve())), - } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - // (err, false = not resolved) because terminate returns Ok() if resolved - Err(e) => Err((e.into(), false)), - }, - RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), - }; - - match resp { - Ok(resp) => Some(Ok(resp)), - // Track if this request has already returned some value downstream. Ensure that - // downstream code only receives a single Result per request. If the serving peer does - // multiple penalizable actions per request, downscore and return None. This allows to - // catch if a peer is returning more blobs than requested or if the excess blobs are - // invalid. - Err((e, resolved)) => { - if let RpcResponseError::VerifyError(e) = &e { - self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - if resolved { - None - } else { - Some(Err(e)) - } - } + let response = self.blobs_by_root_requests.on_response(id, rpc_event); + let response = response.map(|res| { + res.and_then( + |(blobs, seen_timestamp)| match to_fixed_blob_sidecar_list(blobs) { + Ok(blobs) => Ok((blobs, seen_timestamp)), + Err(e) => Err(e.into()), + }, + ) + }); + if let Some(Err(RpcResponseError::VerifyError(e))) = &response { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } + response } #[allow(clippy::type_complexity)] - pub fn on_data_columns_by_root_response( + pub(crate) fn on_data_columns_by_root_response( &mut self, id: DataColumnsByRootRequestId, - _peer_id: PeerId, + peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>>> { - let Entry::Occupied(mut request) = self.data_columns_by_root_requests.entry(id) else { - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(data_column, seen_timestamp) => { - let request = request.get_mut(); - match request.add_response(data_column) { - Ok(Some(data_columns)) => Ok((data_columns, seen_timestamp)), - Ok(None) => return None, - Err(e) => Err((e.into(), request.resolve())), - } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - // (err, false = not resolved) because terminate returns Ok() if resolved - Err(e) => Err((e.into(), false)), - }, - RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), - }; + let resp = self + .data_columns_by_root_requests + .on_response(id, rpc_event); + self.report_rpc_response_errors(resp, peer_id) + } - match resp { - Ok(resp) => Some(Ok(resp)), - // Track if this request has already returned some value downstream. Ensure that - // downstream code only receives a single Result per request. If the serving peer does - // multiple penalizable actions per request, downscore and return None. This allows to - // catch if a peer is returning more columns than requested or if the excess blobs are - // invalid. - Err((e, resolved)) => { - if let RpcResponseError::VerifyError(_e) = &e { - // TODO(das): this is a bug, we should not penalise peer in this case. - // confirm this can be removed. - // self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - if resolved { - None - } else { - Some(Err(e)) - } - } + fn report_rpc_response_errors( + &mut self, + resp: Option>, + peer_id: PeerId, + ) -> Option> { + if let Some(Err(RpcResponseError::VerifyError(e))) = &resp { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } + resp } /// Insert a downloaded column into an active custody request. Then make progress on the diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index 6736bfb82f0..e4bce3dafcd 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -283,6 +283,10 @@ impl ActiveCustodyRequest { block_root: self.block_root, indices: indices.clone(), }, + // true = enforce max_requests are returned data_columns_by_root. We only issue requests + // for blocks after we know the block has data, and only request peers after they claim to + // have imported the block+columns and claim to be custodians + true, ) .map_err(Error::SendFailed)?; diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 0c2f59d143f..b9214bafcd7 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -1,23 +1,187 @@ +use std::{collections::hash_map::Entry, hash::Hash}; + +use beacon_chain::validator_monitor::timestamp_now; +use fnv::FnvHashMap; +use lighthouse_network::PeerId; use strum::IntoStaticStr; use types::Hash256; -pub use blobs_by_root::{ActiveBlobsByRootRequest, BlobsByRootSingleBlockRequest}; -pub use blocks_by_root::{ActiveBlocksByRootRequest, BlocksByRootSingleRequest}; +pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest}; +pub use blocks_by_root::{BlocksByRootRequestItems, BlocksByRootSingleRequest}; pub use data_columns_by_root::{ - ActiveDataColumnsByRootRequest, DataColumnsByRootSingleBlockRequest, + DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +use crate::metrics; + +use super::{RpcEvent, RpcResponseResult}; + mod blobs_by_root; mod blocks_by_root; mod data_columns_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { - NoResponseReturned, - NotEnoughResponsesReturned { expected: usize, actual: usize }, + NotEnoughResponsesReturned { actual: usize }, TooManyResponses, UnrequestedBlockRoot(Hash256), UnrequestedIndex(u64), InvalidInclusionProof, DuplicateData, } + +/// Collection of active requests of a single ReqResp method, i.e. `blocks_by_root` +pub struct ActiveRequests { + requests: FnvHashMap>, + name: &'static str, +} + +/// Stateful container for a single active ReqResp request +struct ActiveRequest { + state: State, + peer_id: PeerId, + // Error if the request terminates before receiving max expected responses + expect_max_responses: bool, +} + +enum State { + Active(T), + CompletedEarly, + Errored, +} + +impl ActiveRequests { + pub fn new(name: &'static str) -> Self { + Self { + requests: <_>::default(), + name, + } + } + + pub fn insert(&mut self, id: K, peer_id: PeerId, expect_max_responses: bool, items: T) { + self.requests.insert( + id, + ActiveRequest { + state: State::Active(items), + peer_id, + expect_max_responses, + }, + ); + } + + /// Handle an `RpcEvent` for a specific request index by `id`. + /// + /// Lighthouse ReqResp protocol API promises to send 0 or more `RpcEvent::Response` chunks, + /// and EITHER a single `RpcEvent::RPCError` or RpcEvent::StreamTermination. + /// + /// Downstream code expects to receive a single `Result` value per request ID. However, + /// `add_item` may convert ReqResp success chunks into errors. This function handles the + /// multiple errors / stream termination internally ensuring that a single `Some` is + /// returned. + pub fn on_response( + &mut self, + id: K, + rpc_event: RpcEvent, + ) -> Option>> { + let Entry::Occupied(mut entry) = self.requests.entry(id) else { + metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &[self.name]); + return None; + }; + + match rpc_event { + // Handler of a success ReqResp chunk. Adds the item to the request accumulator. + // `ActiveRequestItems` validates the item before appending to its internal state. + RpcEvent::Response(item, seen_timestamp) => { + let request = &mut entry.get_mut(); + match &mut request.state { + State::Active(items) => { + match items.add(item) { + // Received all items we are expecting for, return early, but keep the request + // struct to handle the stream termination gracefully. + Ok(true) => { + let items = items.consume(); + request.state = State::CompletedEarly; + Some(Ok((items, seen_timestamp))) + } + // Received item, but we are still expecting more + Ok(false) => None, + // Received an invalid item + Err(e) => { + request.state = State::Errored; + Some(Err(e.into())) + } + } + } + // Should never happen, ReqResp network behaviour enforces a max count of chunks + // When `max_remaining_chunks <= 1` a the inbound stream in terminated in + // `rpc/handler.rs`. Handling this case adds complexity for no gain. Even if an + // attacker could abuse this, there's no gain in sending garbage chunks that + // will be ignored anyway. + State::CompletedEarly => None, + // Ignore items after errors. We may want to penalize repeated invalid chunks + // for the same response. But that's an optimization to ban peers sending + // invalid data faster that we choose to not adopt for now. + State::Errored => None, + } + } + RpcEvent::StreamTermination => { + // After stream termination we must forget about this request, there will be no more + // messages coming from the network + let request = entry.remove(); + match request.state { + // Received a stream termination in a valid sequence, consume items + State::Active(mut items) => { + if request.expect_max_responses { + Some(Err(LookupVerifyError::NotEnoughResponsesReturned { + actual: items.consume().len(), + } + .into())) + } else { + Some(Ok((items.consume(), timestamp_now()))) + } + } + // Items already returned, ignore stream termination + State::CompletedEarly => None, + // Returned an error earlier, ignore stream termination + State::Errored => None, + } + } + RpcEvent::RPCError(e) => { + // After an Error event from the network we must forget about this request as this + // may be the last message for this request. + match entry.remove().state { + // Received error while request is still active, propagate error. + State::Active(_) => Some(Err(e.into())), + // Received error after completing the request, ignore the error. This is okay + // because the network has already registered a downscore event if necessary for + // this message. + State::CompletedEarly => None, + // Received a network error after a validity error. Okay to ignore, see above + State::Errored => None, + } + } + } + } + + pub fn active_requests_of_peer(&self, peer_id: &PeerId) -> Vec<&K> { + self.requests + .iter() + .filter(|(_, request)| &request.peer_id == peer_id) + .map(|(id, _)| id) + .collect() + } + + pub fn len(&self) -> usize { + self.requests.len() + } +} + +pub trait ActiveRequestItems { + type Item; + + /// Add a new item into the accumulator. Returns true if all expected items have been received. + fn add(&mut self, item: Self::Item) -> Result; + + /// Return all accumulated items consuming them. + fn consume(&mut self) -> Vec; +} diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs index cb2b1a42ec4..fefb27a5efc 100644 --- a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -1,8 +1,8 @@ -use lighthouse_network::{rpc::methods::BlobsByRootRequest, PeerId}; +use lighthouse_network::rpc::methods::BlobsByRootRequest; use std::sync::Arc; use types::{blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Clone)] pub struct BlobsByRootSingleBlockRequest { @@ -25,34 +25,27 @@ impl BlobsByRootSingleBlockRequest { } } -pub struct ActiveBlobsByRootRequest { +pub struct BlobsByRootRequestItems { request: BlobsByRootSingleBlockRequest, - blobs: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, + items: Vec>>, } -impl ActiveBlobsByRootRequest { - pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { +impl BlobsByRootRequestItems { + pub fn new(request: BlobsByRootSingleBlockRequest) -> Self { Self { request, - blobs: vec![], - resolved: false, - peer_id, + items: vec![], } } +} + +impl ActiveRequestItems for BlobsByRootRequestItems { + type Item = Arc>; /// Appends a chunk to this multi-item request. If all expected chunks are received, this /// method returns `Some`, resolving the request before the stream terminator. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - blob: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, blob: Self::Item) -> Result { let block_root = blob.block_root(); if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); @@ -63,34 +56,16 @@ impl ActiveBlobsByRootRequest { if !self.request.indices.contains(&blob.index) { return Err(LookupVerifyError::UnrequestedIndex(blob.index)); } - if self.blobs.iter().any(|b| b.index == blob.index) { + if self.items.iter().any(|b| b.index == blob.index) { return Err(LookupVerifyError::DuplicateData); } - self.blobs.push(blob); - if self.blobs.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.blobs))) - } else { - Ok(None) - } - } + self.items.push(blob); - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.blobs.len(), - }) - } + Ok(self.items.len() >= self.request.indices.len()) } - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs index a15d4e39353..f3cdcbe714f 100644 --- a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs @@ -1,9 +1,9 @@ use beacon_chain::get_block_root; -use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; +use lighthouse_network::rpc::BlocksByRootRequest; use std::sync::Arc; use types::{ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Copy, Clone)] pub struct BlocksByRootSingleRequest(pub Hash256); @@ -14,47 +14,38 @@ impl BlocksByRootSingleRequest { } } -pub struct ActiveBlocksByRootRequest { +pub struct BlocksByRootRequestItems { request: BlocksByRootSingleRequest, - resolved: bool, - pub(crate) peer_id: PeerId, + items: Vec>>, } -impl ActiveBlocksByRootRequest { - pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { +impl BlocksByRootRequestItems { + pub fn new(request: BlocksByRootSingleRequest) -> Self { Self { request, - resolved: false, - peer_id, + items: vec![], } } +} + +impl ActiveRequestItems for BlocksByRootRequestItems { + type Item = Arc>; /// Append a response to the single chunk request. If the chunk is valid, the request is /// resolved immediately. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - block: Arc>, - ) -> Result>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, block: Self::Item) -> Result { let block_root = get_block_root(&block); if self.request.0 != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); } - // Valid data, blocks by root expects a single response - self.resolved = true; - Ok(block) + self.items.push(block); + // Always returns true, blocks by root expects a single response + Ok(true) } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NoResponseReturned) - } + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs index a42ae7ca41f..1b8d46ff072 100644 --- a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs @@ -1,9 +1,8 @@ -use lighthouse_network::service::api_types::DataColumnsByRootRequester; -use lighthouse_network::{rpc::methods::DataColumnsByRootRequest, PeerId}; +use lighthouse_network::rpc::methods::DataColumnsByRootRequest; use std::sync::Arc; use types::{ChainSpec, DataColumnIdentifier, DataColumnSidecar, EthSpec, Hash256}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Clone)] pub struct DataColumnsByRootSingleBlockRequest { @@ -26,40 +25,27 @@ impl DataColumnsByRootSingleBlockRequest { } } -pub struct ActiveDataColumnsByRootRequest { +pub struct DataColumnsByRootRequestItems { request: DataColumnsByRootSingleBlockRequest, items: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, - pub(crate) requester: DataColumnsByRootRequester, } -impl ActiveDataColumnsByRootRequest { - pub fn new( - request: DataColumnsByRootSingleBlockRequest, - peer_id: PeerId, - requester: DataColumnsByRootRequester, - ) -> Self { +impl DataColumnsByRootRequestItems { + pub fn new(request: DataColumnsByRootSingleBlockRequest) -> Self { Self { request, items: vec![], - resolved: false, - peer_id, - requester, } } +} + +impl ActiveRequestItems for DataColumnsByRootRequestItems { + type Item = Arc>; /// Appends a chunk to this multi-item request. If all expected chunks are received, this /// method returns `Some`, resolving the request before the stream terminator. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - data_column: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, data_column: Self::Item) -> Result { let block_root = data_column.block_root(); if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); @@ -75,29 +61,11 @@ impl ActiveDataColumnsByRootRequest { } self.items.push(data_column); - if self.items.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.items))) - } else { - Ok(None) - } - } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.items.len(), - }) - } + Ok(self.items.len() >= self.request.indices.len()) } - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs similarity index 83% rename from beacon_node/network/src/sync/sampling.rs rename to beacon_node/network/src/sync/peer_sampling.rs index 524fe86bee9..289ed73cdd2 100644 --- a/beacon_node/network/src/sync/sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -1,4 +1,6 @@ use self::request::ActiveColumnSampleRequest; +#[cfg(test)] +pub(crate) use self::request::Status; use super::network_context::{ DataColumnsByRootSingleBlockRequest, RpcResponseError, SyncNetworkContext, }; @@ -22,7 +24,6 @@ pub type SamplingResult = Result<(), SamplingError>; type DataColumnSidecarList = Vec>>; pub struct Sampling { - // TODO(das): stalled sampling request are never cleaned up requests: HashMap>, sampling_config: SamplingConfig, log: slog::Logger, @@ -42,6 +43,18 @@ impl Sampling { self.requests.values().map(|r| r.block_root).collect() } + #[cfg(test)] + pub fn get_request_status( + &self, + block_root: Hash256, + index: &ColumnIndex, + ) -> Option { + let requester = SamplingRequester::ImportedBlock(block_root); + self.requests + .get(&requester) + .and_then(|req| req.get_request_status(index)) + } + /// Create a new sampling request for a known block /// /// ### Returns @@ -74,7 +87,11 @@ impl Sampling { } }; - debug!(self.log, "Created new sample request"; "id" => ?id); + debug!(self.log, + "Created new sample request"; + "id" => ?id, + "column_selection" => ?request.column_selection() + ); // TOOD(das): If a node has very little peers, continue_sampling() will attempt to find enough // to sample here, immediately failing the sampling request. There should be some grace @@ -220,6 +237,20 @@ impl ActiveSamplingRequest { } } + #[cfg(test)] + pub fn get_request_status(&self, index: &ColumnIndex) -> Option { + self.column_requests.get(index).map(|req| req.status()) + } + + /// Return the current ordered list of columns that this requests has to sample to succeed + pub(crate) fn column_selection(&self) -> Vec { + self.column_shuffle + .iter() + .take(REQUIRED_SUCCESSES[0]) + .copied() + .collect() + } + /// Insert a downloaded column into an active sampling request. Then make progress on the /// entire request. /// @@ -244,22 +275,35 @@ impl ActiveSamplingRequest { .column_indexes_by_sampling_request .get(&sampling_request_id) else { - error!(self.log, "Column indexes for the sampling request ID not found"; "sampling_request_id" => ?sampling_request_id); + error!(self.log, + "Column indexes for the sampling request ID not found"; + "sampling_request_id" => ?sampling_request_id + ); return Ok(None); }; match resp { Ok((mut resp_data_columns, seen_timestamp)) => { - debug!(self.log, "Sample download success"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "count" => resp_data_columns.len()); + let resp_column_indexes = resp_data_columns + .iter() + .map(|r| r.index) + .collect::>(); + debug!(self.log, + "Sample download success"; + "block_root" => %self.block_root, + "column_indexes" => ?resp_column_indexes, + "count" => resp_data_columns.len() + ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::SUCCESS]); // Filter the data received in the response using the requested column indexes. let mut data_columns = vec![]; for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - self.log, - "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + warn!(self.log, + "Active column sample request not found"; + "block_root" => %self.block_root, + "column_index" => column_index ); continue; }; @@ -268,9 +312,13 @@ impl ActiveSamplingRequest { .iter() .position(|data| &data.index == column_index) else { - // Peer does not have the requested data. - // TODO(das) what to do? - debug!(self.log, "Sampling peer claims to not have the data"; "block_root" => %self.block_root, "column_index" => column_index); + // Peer does not have the requested data, mark peer as "dont have" and try + // again with a different peer. + debug!(self.log, + "Sampling peer claims to not have the data"; + "block_root" => %self.block_root, + "column_index" => column_index + ); request.on_sampling_error()?; continue; }; @@ -283,15 +331,16 @@ impl ActiveSamplingRequest { .iter() .map(|d| d.index) .collect::>(); - debug!( - self.log, - "Received data that was not requested"; "block_root" => %self.block_root, "column_indexes" => ?resp_column_indexes + debug!(self.log, + "Received data that was not requested"; + "block_root" => %self.block_root, + "column_indexes" => ?resp_column_indexes ); } // Handle the downloaded data columns. if data_columns.is_empty() { - debug!(self.log,"Received empty response"; "block_root" => %self.block_root); + debug!(self.log, "Received empty response"; "block_root" => %self.block_root); self.column_indexes_by_sampling_request .remove(&sampling_request_id); } else { @@ -302,10 +351,18 @@ impl ActiveSamplingRequest { // Peer has data column, send to verify let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { // If processor is not available, error the entire sampling - debug!(self.log, "Dropping sampling"; "block" => %self.block_root, "reason" => "beacon processor unavailable"); + debug!(self.log, + "Dropping sampling"; + "block" => %self.block_root, + "reason" => "beacon processor unavailable" + ); return Err(SamplingError::ProcessorUnavailable); }; - debug!(self.log, "Sending data_column for verification"; "block" => ?self.block_root, "column_indexes" => ?column_indexes); + debug!(self.log, + "Sending data_column for verification"; + "block" => ?self.block_root, + "column_indexes" => ?column_indexes + ); if let Err(e) = beacon_processor.send_rpc_validate_data_columns( self.block_root, data_columns, @@ -315,23 +372,34 @@ impl ActiveSamplingRequest { sampling_request_id, }, ) { - // TODO(das): Beacon processor is overloaded, what should we do? - error!(self.log, "Dropping sampling"; "block" => %self.block_root, "reason" => e.to_string()); + // Beacon processor is overloaded, drop sampling attempt. Failing to sample + // is not a permanent state so we should recover once the node has capacity + // and receives a descendant block. + error!(self.log, + "Dropping sampling"; + "block" => %self.block_root, + "reason" => e.to_string() + ); return Err(SamplingError::SendFailed("beacon processor send failure")); } } } Err(err) => { - debug!(self.log, "Sample download error"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "error" => ?err); + debug!(self.log, "Sample download error"; + "block_root" => %self.block_root, + "column_indexes" => ?column_indexes, + "error" => ?err + ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::FAILURE]); - // Error downloading, maybe penalize peer and retry again. - // TODO(das) with different peer or different peer? + // Error downloading, malicious network errors are already penalized before + // reaching this function. Mark the peer as failed and try again with another. for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - self.log, - "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + warn!(self.log, + "Active column sample request not found"; + "block_root" => %self.block_root, + "column_index" => column_index ); continue; }; @@ -386,7 +454,7 @@ impl ActiveSamplingRequest { debug!(self.log, "Sample verification failure"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "reason" => ?err); metrics::inc_counter_vec(&metrics::SAMPLE_VERIFY_RESULT, &[metrics::FAILURE]); - // TODO(das): Peer sent invalid data, penalize and try again from different peer + // Peer sent invalid data, penalize and try again from different peer // TODO(das): Count individual failures for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { @@ -477,6 +545,10 @@ impl ActiveSamplingRequest { block_root: self.block_root, indices: column_indexes.clone(), }, + // false = We issue request to custodians who may or may not have received the + // samples yet. We don't any signal (like an attestation or status messages that the + // custodian has received data). + false, ) .map_err(SamplingError::SendFailed)?; self.column_indexes_by_sampling_request @@ -522,8 +594,9 @@ mod request { peers_dont_have: HashSet, } + // Exposed only for testing assertions in lookup tests #[derive(Debug, Clone)] - enum Status { + pub(crate) enum Status { NoPeers, NotStarted, Sampling(PeerId), @@ -567,6 +640,11 @@ mod request { } } + #[cfg(test)] + pub(crate) fn status(&self) -> Status { + self.status.clone() + } + pub(crate) fn choose_peer( &mut self, cx: &SyncNetworkContext, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index ed5946ada72..51d9d9da37f 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -8,9 +8,9 @@ use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, Ba use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; -use lighthouse_metrics::set_int_gauge; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; +use metrics::set_int_gauge; use rand::seq::SliceRandom; use rand::Rng; use slog::{crit, debug, o, warn}; @@ -444,9 +444,9 @@ impl SyncingChain { self.request_batches(network)?; } } - } else if !self.good_peers_on_custody_subnets(self.processing_target, network) { + } else if !self.good_peers_on_sampling_subnets(self.processing_target, network) { // This is to handle the case where no batch was sent for the current processing - // target when there is no custody peers available. This is a valid state and should not + // target when there is no sampling peers available. This is a valid state and should not // return an error. return Ok(KeepChain); } else { @@ -1075,10 +1075,10 @@ impl SyncingChain { // check if we have the batch for our optimistic start. If not, request it first. // We wait for this batch before requesting any other batches. if let Some(epoch) = self.optimistic_start { - if !self.good_peers_on_custody_subnets(epoch, network) { + if !self.good_peers_on_sampling_subnets(epoch, network) { debug!( self.log, - "Waiting for peers to be available on custody column subnets" + "Waiting for peers to be available on sampling column subnets" ); return Ok(KeepChain); } @@ -1107,14 +1107,18 @@ impl SyncingChain { Ok(KeepChain) } - /// Checks all custody column subnets for peers. Returns `true` if there is at least one peer in - /// every custody column subnet. - fn good_peers_on_custody_subnets(&self, epoch: Epoch, network: &SyncNetworkContext) -> bool { + /// Checks all sampling column subnets for peers. Returns `true` if there is at least one peer in + /// every sampling column subnet. + fn good_peers_on_sampling_subnets( + &self, + epoch: Epoch, + network: &SyncNetworkContext, + ) -> bool { if network.chain.spec.is_peer_das_enabled_for_epoch(epoch) { - // Require peers on all custody column subnets before sending batches + // Require peers on all sampling column subnets before sending batches let peers_on_all_custody_subnets = network .network_globals() - .custody_subnets + .sampling_subnets .iter() .all(|subnet_id| { let peer_count = network @@ -1167,11 +1171,11 @@ impl SyncingChain { return None; } - // don't send batch requests until we have peers on custody subnets + // don't send batch requests until we have peers on sampling subnets // TODO(das): this is a workaround to avoid sending out excessive block requests because // block and data column requests are currently coupled. This can be removed once we find a // way to decouple the requests and do retries individually, see issue #6258. - if !self.good_peers_on_custody_subnets(self.to_be_downloaded, network) { + if !self.good_peers_on_sampling_subnets(self.to_be_downloaded, network) { debug!( self.log, "Waiting for peers to be available on custody column subnets" diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index f28b57eb187..0ef99838dee 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -386,6 +386,7 @@ where #[cfg(test)] mod tests { use crate::network_beacon_processor::NetworkBeaconProcessor; + use crate::sync::SyncMessage; use crate::NetworkMessage; use super::*; @@ -537,21 +538,20 @@ mod tests { } else { panic!("Should have sent a batch request to the peer") }; - let blob_req_id = match fork_name { - ForkName::Deneb | ForkName::Electra => { - if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - Some(request_id) - } else { - panic!("Should have sent a batch request to the peer") - } + let blob_req_id = if fork_name.deneb_enabled() { + if let Ok(NetworkMessage::SendRequest { + peer_id, + request: _, + request_id, + }) = self.network_rx.try_recv() + { + assert_eq!(&peer_id, expected_peer); + Some(request_id) + } else { + panic!("Should have sent a batch request to the peer") } - _ => None, + } else { + None }; (block_req_id, blob_req_id) } @@ -691,6 +691,7 @@ mod tests { log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let (sync_tx, _sync_rx) = mpsc::unbounded_channel::>(); let network_config = Arc::new(NetworkConfig::default()); let globals = Arc::new(NetworkGlobals::new_test_globals( Vec::new(), @@ -701,6 +702,7 @@ mod tests { let (network_beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals.clone(), + sync_tx, chain.clone(), harness.runtime.task_executor.clone(), log.clone(), diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/tests/lookups.rs similarity index 91% rename from beacon_node/network/src/sync/block_lookups/tests.rs rename to beacon_node/network/src/sync/tests/lookups.rs index c0a766137bf..9f2c9ef66f0 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1,93 +1,50 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; -use crate::sync::manager::{BlockProcessType, SyncManager}; -use crate::sync::sampling::SamplingConfig; -use crate::sync::{SamplingId, SyncMessage}; +use crate::sync::block_lookups::{ + BlockLookupSummary, PARENT_DEPTH_TOLERANCE, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, +}; +use crate::sync::{ + manager::{BlockProcessType, BlockProcessingResult, SyncManager}, + peer_sampling::SamplingConfig, + SamplingId, SyncMessage, +}; use crate::NetworkMessage; use std::sync::Arc; +use std::time::Duration; use super::*; use crate::sync::block_lookups::common::ResponseType; -use beacon_chain::blob_verification::GossipVerifiedBlob; -use beacon_chain::block_verification_types::BlockImportData; -use beacon_chain::builder::Witness; -use beacon_chain::data_availability_checker::Availability; -use beacon_chain::eth1_chain::CachingEth1Backend; -use beacon_chain::test_utils::{ - build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, - BeaconChainHarness, EphemeralHarnessType, NumBlobs, -}; -use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ - AvailabilityPendingExecutedBlock, PayloadVerificationOutcome, PayloadVerificationStatus, + blob_verification::GossipVerifiedBlob, + block_verification_types::{AsBlock, BlockImportData}, + data_availability_checker::Availability, + test_utils::{ + build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, + BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, + }, + validator_monitor::timestamp_now, + AvailabilityPendingExecutedBlock, AvailabilityProcessingStatus, BlockError, + PayloadVerificationOutcome, PayloadVerificationStatus, }; use beacon_processor::WorkEvent; -use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; -use lighthouse_network::service::api_types::{ - AppRequestId, DataColumnsByRootRequester, Id, SamplingRequester, SingleLookupReqId, - SyncRequestId, +use lighthouse_network::{ + rpc::{RPCError, RequestType, RpcErrorResponse}, + service::api_types::{ + AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, + SamplingRequester, SingleLookupReqId, SyncRequestId, + }, + types::SyncState, + NetworkConfig, NetworkGlobals, PeerId, }; -use lighthouse_network::types::SyncState; -use lighthouse_network::{NetworkConfig, NetworkGlobals, Request}; use slog::info; -use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; -use store::MemoryStore; +use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; -use types::data_column_sidecar::ColumnIndex; -use types::test_utils::TestRandom; use types::{ - test_utils::{SeedableRng, XorShiftRng}, - BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, Slot, + data_column_sidecar::ColumnIndex, + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ForkName, + Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, }; -use types::{BeaconState, BeaconStateBase}; -use types::{DataColumnSidecar, Epoch}; - -type T = Witness, E, MemoryStore, MemoryStore>; - -/// This test utility enables integration testing of Lighthouse sync components. -/// -/// It covers the following: -/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. -/// 2. Making assertions on `WorkEvent`s received from sync -/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). -/// -/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: -/// +-----------------+ -/// | BeaconProcessor | -/// +---------+-------+ -/// ^ | -/// | | -/// WorkEvent | | SyncMsg -/// | | (Result) -/// | v -/// +--------+ +-----+-----------+ +----------------+ -/// | Router +----------->| SyncManager +------------>| NetworkService | -/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ -/// (RPC resp) | - RangeSync | (RPC req) -/// +-----------------+ -/// | - BackFillSync | -/// +-----------------+ -/// | - BlockLookups | -/// +-----------------+ -struct TestRig { - /// Receiver for `BeaconProcessor` events (e.g. block processing results). - beacon_processor_rx: mpsc::Receiver>, - beacon_processor_rx_queue: Vec>, - /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) - network_rx: mpsc::UnboundedReceiver>, - /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) - network_rx_queue: Vec>, - /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. - sync_manager: SyncManager, - /// To manipulate sync state and peer connection status - network_globals: Arc>, - /// Beacon chain harness - harness: BeaconChainHarness>, - /// `rng` for generating test blocks and blobs. - rng: XorShiftRng, - fork_name: ForkName, - log: Logger, -} const D: Duration = Duration::new(0, 0); const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; @@ -102,8 +59,14 @@ struct TestRigConfig { impl TestRig { fn test_setup_with_config(config: Option) -> Self { - let enable_log = cfg!(feature = "test_logger"); - let log = build_log(slog::Level::Trace, enable_log); + let logger_type = if cfg!(feature = "test_logger") { + LoggerType::Test + } else if cfg!(feature = "ci_logger") { + LoggerType::CI + } else { + LoggerType::Null + }; + let log = build_log(slog::Level::Trace, logger_type); // Use `fork_from_env` logic to set correct fork epochs let mut spec = test_spec::(); @@ -130,6 +93,7 @@ impl TestRig { let chain = harness.chain.clone(); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let (sync_tx, sync_rx) = mpsc::unbounded_channel::>(); // TODO(das): make the generation of the ENR use the deterministic rng to have consistent // column assignments let network_config = Arc::new(NetworkConfig::default()); @@ -141,13 +105,12 @@ impl TestRig { )); let (beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals, + sync_tx, chain.clone(), harness.runtime.task_executor.clone(), log.clone(), ); - let (_sync_send, sync_recv) = mpsc::unbounded_channel::>(); - let fork_name = chain.spec.fork_name_at_slot::(chain.slot().unwrap()); // All current tests expect synced and EL online state @@ -161,13 +124,15 @@ impl TestRig { beacon_processor_rx_queue: vec![], network_rx, network_rx_queue: vec![], + sync_rx, rng, network_globals: beacon_processor.network_globals.clone(), sync_manager: SyncManager::new( chain, network_tx, beacon_processor.into(), - sync_recv, + // Pass empty recv not tied to any tx + mpsc::unbounded_channel().1, SamplingConfig::Custom { required_successes: vec![SAMPLING_REQUIRED_SUCCESSES], }, @@ -230,6 +195,13 @@ impl TestRig { self.send_sync_message(SyncMessage::SampleBlock(block_root, block_slot)) } + /// Drain all sync messages in the sync_rx attached to the beacon processor + fn drain_sync_rx(&mut self) { + while let Ok(sync_message) = self.sync_rx.try_recv() { + self.send_sync_message(sync_message); + } + } + fn rand_block(&mut self) -> SignedBeaconBlock { self.rand_block_and_blobs(NumBlobs::None).0 } @@ -286,6 +258,10 @@ impl TestRig { self.sync_manager.active_parent_lookups().len() } + fn active_range_sync_chain(&self) -> (RangeSyncType, Slot, Slot) { + self.sync_manager.get_range_sync_chains().unwrap().unwrap() + } + fn assert_single_lookups_count(&self, count: usize) { assert_eq!( self.active_single_lookups_count(), @@ -303,6 +279,13 @@ impl TestRig { ); } + fn expect_active_sampling(&mut self, block_root: &Hash256) { + assert!(self + .sync_manager + .active_sampling_requests() + .contains(block_root)); + } + fn expect_clean_finished_sampling(&mut self) { self.expect_empty_network(); self.expect_sampling_result_work(); @@ -618,7 +601,7 @@ impl TestRig { id, peer_id, RPCError::ErrorResponse( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "older than deneb".into(), ), ); @@ -715,10 +698,10 @@ impl TestRig { let first_dc = data_columns.first().unwrap(); let block_root = first_dc.block_root(); let sampling_request_id = match id.0 { - SyncRequestId::DataColumnsByRoot( - _, - _requester @ DataColumnsByRootRequester::Sampling(sampling_id), - ) => sampling_id.sampling_request_id, + SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { + requester: DataColumnsByRootRequester::Sampling(sampling_id), + .. + }) => sampling_id.sampling_request_id, _ => unreachable!(), }; self.complete_data_columns_by_root_request(id, data_columns); @@ -743,14 +726,15 @@ impl TestRig { data_columns: Vec>>, missing_components: bool, ) { - let lookup_id = - if let SyncRequestId::DataColumnsByRoot(_, DataColumnsByRootRequester::Custody(id)) = - ids.first().unwrap().0 - { - id.requester.0.lookup_id - } else { - panic!("not a custody requester") - }; + let lookup_id = if let SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { + requester: DataColumnsByRootRequester::Custody(id), + .. + }) = ids.first().unwrap().0 + { + id.requester.0.lookup_id + } else { + panic!("not a custody requester") + }; let first_column = data_columns.first().cloned().unwrap(); @@ -894,7 +878,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlocksByRoot(request), + request: RequestType::BlocksByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, @@ -914,7 +898,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlobsByRoot(request), + request: RequestType::BlobsByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids @@ -939,7 +923,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlocksByRoot(request), + request: RequestType::BlocksByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, @@ -961,7 +945,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlobsByRoot(request), + request: RequestType::BlobsByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids @@ -989,7 +973,7 @@ impl TestRig { .pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::DataColumnsByRoot(request), + request: RequestType::DataColumnsByRoot(request), request_id: AppRequestId::Sync(id @ SyncRequestId::DataColumnsByRoot { .. }), } if request .data_column_ids @@ -1083,6 +1067,11 @@ impl TestRig { .unwrap_or_else(|e| panic!("Expected sampling result work: {e}")) } + fn expect_no_work_event(&mut self) { + self.drain_processor_rx(); + assert!(self.network_rx_queue.is_empty()); + } + fn expect_no_penalty_for(&mut self, peer_id: PeerId) { self.drain_network_rx(); let downscore_events = self @@ -1154,6 +1143,7 @@ impl TestRig { penalty_msg, expect_penalty_msg, "Unexpected penalty msg for {peer_id}" ); + self.log(&format!("Found expected penalty {penalty_msg}")); } pub fn expect_single_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { @@ -1283,6 +1273,46 @@ impl TestRig { imported: false, }); } + + fn assert_sampling_request_ongoing(&self, block_root: Hash256, indices: &[ColumnIndex]) { + for index in indices { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + if !matches!(status, crate::sync::peer_sampling::Status::Sampling { .. }) { + panic!("expected {block_root} {index} request to be on going: {status:?}"); + } + } + } + + fn assert_sampling_request_nopeers(&self, block_root: Hash256, indices: &[ColumnIndex]) { + for index in indices { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + if !matches!(status, crate::sync::peer_sampling::Status::NoPeers { .. }) { + panic!("expected {block_root} {index} request to be no peers: {status:?}"); + } + } + } + + fn log_sampling_requests(&self, block_root: Hash256, indices: &[ColumnIndex]) { + let statuses = indices + .iter() + .map(|index| { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + (index, status) + }) + .collect::>(); + self.log(&format!( + "Sampling request status for {block_root}: {statuses:?}" + )); + } } #[test] @@ -1341,7 +1371,7 @@ fn test_single_block_lookup_empty_response() { // The peer does not have the block. It should be penalized. r.single_lookup_block_response(id, peer_id, None); - r.expect_penalty(peer_id, "NoResponseReturned"); + r.expect_penalty(peer_id, "NotEnoughResponsesReturned"); // it should be retried let id = r.expect_block_lookup_request(block_root); // Send the right block this time. @@ -1667,7 +1697,18 @@ fn test_parent_lookup_too_deep_grow_ancestor() { ) } - rig.expect_penalty(peer_id, "chain_too_long"); + // Should create a new syncing chain + rig.drain_sync_rx(); + assert_eq!( + rig.active_range_sync_chain(), + ( + RangeSyncType::Head, + Slot::new(0), + Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 1) + ) + ); + // Should not penalize peer, but network is not clear because of the blocks_by_range requests + rig.expect_no_penalty_for(peer_id); rig.assert_failed_chain(chain_hash); } @@ -1694,7 +1735,18 @@ fn test_parent_lookup_too_deep_grow_tip() { ); } - rig.expect_penalty(peer_id, "chain_too_long"); + // Should create a new syncing chain + rig.drain_sync_rx(); + assert_eq!( + rig.active_range_sync_chain(), + ( + RangeSyncType::Head, + Slot::new(0), + Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 2) + ) + ); + // Should not penalize peer, but network is not clear because of the blocks_by_range requests + rig.expect_no_penalty_for(peer_id); rig.assert_failed_chain(tip.canonical_root()); } @@ -2016,6 +2068,77 @@ fn sampling_avoid_retrying_same_peer() { r.expect_empty_network(); } +#[test] +fn sampling_batch_requests() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let _supernode = r.new_connected_supernode_peer(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + + // Retrieve the sample request, which should be batched. + let (sync_request_id, column_indexes) = r + .expect_only_data_columns_by_root_requests(block_root, 1) + .pop() + .unwrap(); + assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); + r.assert_sampling_request_ongoing(block_root, &column_indexes); + + // Resolve the request. + r.complete_valid_sampling_column_requests( + vec![(sync_request_id, column_indexes.clone())], + data_columns, + ); + r.expect_clean_finished_sampling(); +} + +#[test] +fn sampling_batch_requests_not_enough_responses_returned() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let _supernode = r.new_connected_supernode_peer(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + + // Retrieve the sample request, which should be batched. + let (sync_request_id, column_indexes) = r + .expect_only_data_columns_by_root_requests(block_root, 1) + .pop() + .unwrap(); + assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); + + // The request status should be set to Sampling. + r.assert_sampling_request_ongoing(block_root, &column_indexes); + + // Split the indexes to simulate the case where the supernode doesn't have the requested column. + let (column_indexes_supernode_does_not_have, column_indexes_to_complete) = + column_indexes.split_at(1); + + // Complete the requests but only partially, so a NotEnoughResponsesReturned error occurs. + let data_columns_to_complete = data_columns + .iter() + .filter(|d| column_indexes_to_complete.contains(&d.index)) + .cloned() + .collect::>(); + r.complete_data_columns_by_root_request( + (sync_request_id, column_indexes.clone()), + &data_columns_to_complete, + ); + + // The request status should be set to NoPeers since the supernode, the only peer, returned not enough responses. + r.log_sampling_requests(block_root, &column_indexes); + r.assert_sampling_request_nopeers(block_root, column_indexes_supernode_does_not_have); + + // The sampling request stalls. + r.expect_empty_network(); + r.expect_no_work_event(); + r.expect_active_sampling(&block_root); +} + #[test] fn custody_lookup_happy_path() { let Some(mut r) = TestRig::test_setup_after_peerdas() else { @@ -2030,9 +2153,10 @@ fn custody_lookup_happy_path() { // Should not request blobs let id = r.expect_block_lookup_request(block.canonical_root()); r.complete_valid_block_request(id, block.into(), true); - let custody_column_count = spec.custody_requirement * spec.data_columns_per_subnet() as u64; + // for each slot we download `samples_per_slot` columns + let sample_column_count = spec.samples_per_slot * spec.data_columns_per_subnet() as u64; let custody_ids = - r.expect_only_data_columns_by_root_requests(block_root, custody_column_count as usize); + r.expect_only_data_columns_by_root_requests(block_root, sample_column_count as usize); r.complete_valid_custody_request(custody_ids, data_columns, false); r.expect_no_active_lookups(); } @@ -2552,11 +2676,6 @@ mod deneb_only { self.blobs.pop().expect("blobs"); self } - fn invalidate_blobs_too_many(mut self) -> Self { - let first_blob = self.blobs.first().expect("blob").clone(); - self.blobs.push(first_blob); - self - } fn expect_block_process(mut self) -> Self { self.rig.expect_block_process(ResponseType::Block); self @@ -2645,21 +2764,6 @@ mod deneb_only { .expect_no_block_request(); } - #[test] - fn single_block_response_then_too_many_blobs_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .block_response_triggering_process() - .invalidate_blobs_too_many() - .blobs_response() - .expect_penalty("TooManyResponses") - // Network context returns "download success" because the request has enough blobs + it - // downscores the peer for returning too many. - .expect_no_block_request(); - } - // Test peer returning block that has unknown parent, and a new lookup is created #[test] fn parent_block_unknown_parent() { @@ -2700,7 +2804,7 @@ mod deneb_only { }; tester .empty_block_response() - .expect_penalty("NoResponseReturned") + .expect_penalty("NotEnoughResponsesReturned") .expect_block_request() .expect_no_blobs_request() .block_response_and_expect_blob_request() diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs new file mode 100644 index 00000000000..47666b413c5 --- /dev/null +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -0,0 +1,67 @@ +use crate::sync::manager::SyncManager; +use crate::sync::range_sync::RangeSyncType; +use crate::sync::SyncMessage; +use crate::NetworkMessage; +use beacon_chain::builder::Witness; +use beacon_chain::eth1_chain::CachingEth1Backend; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use beacon_processor::WorkEvent; +use lighthouse_network::NetworkGlobals; +use slog::Logger; +use slot_clock::ManualSlotClock; +use std::sync::Arc; +use store::MemoryStore; +use tokio::sync::mpsc; +use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; + +mod lookups; +mod range; + +type T = Witness, E, MemoryStore, MemoryStore>; + +/// This test utility enables integration testing of Lighthouse sync components. +/// +/// It covers the following: +/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. +/// 2. Making assertions on `WorkEvent`s received from sync +/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). +/// +/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: +/// +-----------------+ +/// | BeaconProcessor | +/// +---------+-------+ +/// ^ | +/// | | +/// WorkEvent | | SyncMsg +/// | | (Result) +/// | v +/// +--------+ +-----+-----------+ +----------------+ +/// | Router +----------->| SyncManager +------------>| NetworkService | +/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ +/// (RPC resp) | - RangeSync | (RPC req) +/// +-----------------+ +/// | - BackFillSync | +/// +-----------------+ +/// | - BlockLookups | +/// +-----------------+ +struct TestRig { + /// Receiver for `BeaconProcessor` events (e.g. block processing results). + beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx_queue: Vec>, + /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) + network_rx: mpsc::UnboundedReceiver>, + /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) + network_rx_queue: Vec>, + /// Receiver for `SyncMessage` from the network + sync_rx: mpsc::UnboundedReceiver>, + /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. + sync_manager: SyncManager, + /// To manipulate sync state and peer connection status + network_globals: Arc>, + /// Beacon chain harness + harness: BeaconChainHarness>, + /// `rng` for generating test blocks and blobs. + rng: XorShiftRng, + fork_name: ForkName, + log: Logger, +} diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/beacon_node/network/src/sync/tests/range.rs @@ -0,0 +1 @@ + diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index cbf6284f2ae..5b48e3f0d88 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -7,7 +7,7 @@ edition = { workspace = true } [dependencies] derivative = { workspace = true } itertools = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } types = { workspace = true } state_processing = { workspace = true } @@ -25,4 +25,4 @@ tokio = { workspace = true } maplit = { workspace = true } [features] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/beacon_node/operation_pool/src/attestation_id.rs b/beacon_node/operation_pool/src/attestation_id.rs deleted file mode 100644 index f0dc6536a54..00000000000 --- a/beacon_node/operation_pool/src/attestation_id.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; - -/// Serialized `AttestationData` augmented with a domain to encode the fork info. -/// -/// [DEPRECATED] To be removed once all nodes have updated to schema v12. -#[derive( - PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode, Serialize, Deserialize, -)] -pub struct AttestationId { - v: Vec, -} diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index 07fd72f02c5..cbab97e7199 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -113,7 +113,7 @@ impl BlsToExecutionChanges { .validators() .get(validator_index as usize) .map_or(true, |validator| { - let prune = validator.has_eth1_withdrawal_credential(spec) + let prune = validator.has_execution_withdrawal_credential(spec) && head_block .message() .body() diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index e6a61edc098..3a002bf8703 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1,5 +1,4 @@ mod attestation; -mod attestation_id; mod attestation_storage; mod attester_slashing; mod bls_to_execution_changes; @@ -585,7 +584,7 @@ impl OperationPool { && state .get_validator(address_change.as_inner().message.validator_index as usize) .map_or(false, |validator| { - !validator.has_eth1_withdrawal_credential(spec) + !validator.has_execution_withdrawal_credential(spec) }) }, |address_change| address_change.as_inner().clone(), diff --git a/beacon_node/operation_pool/src/metrics.rs b/beacon_node/operation_pool/src/metrics.rs index e2a8b43ed17..14088688e5e 100644 --- a/beacon_node/operation_pool/src/metrics.rs +++ b/beacon_node/operation_pool/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static BUILD_REWARD_CACHE_TIME: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 1e9611fd1eb..34b03a09557 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -401,15 +401,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("self-limiter") - .long("self-limiter") - .help("This flag is deprecated and has no effect.") - .hide(true) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("disable-self-limiter") .long("disable-self-limiter") @@ -525,16 +516,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("http-spec-fork") - .long("http-spec-fork") - .requires("enable_http") - .value_name("FORK") - .help("This flag is deprecated and has no effect.") - .hide(true) - .action(ArgAction::Set) - .display_order(0) - ) .arg( Arg::new("http-enable-tls") .long("http-enable-tls") @@ -564,16 +545,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("http-allow-sync-stalled") - .long("http-allow-sync-stalled") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .requires("enable_http") - .help("This flag is deprecated and has no effect.") - .hide(true) - .display_order(0) - ) .arg( Arg::new("http-sse-capacity-multiplier") .long("http-sse-capacity-multiplier") @@ -659,7 +630,15 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - + .arg( + Arg::new("idontwant-message-size-threshold") + .long("idontwant-message-size-threshold") + .help("Specifies the minimum message size for which IDONTWANT messages are sent. \ + This an optimization strategy to not send IDONTWANT messages for smaller messages.") + .action(ArgAction::Set) + .hide(true) + .display_order(0) + ) /* * Monitoring metrics */ @@ -814,6 +793,7 @@ pub fn cli_app() -> Command { .help("Server endpoint for an execution layer JWT-authenticated HTTP \ JSON-RPC connection. Uses the same endpoint to populate the \ deposit cache.") + .required(true) .action(ArgAction::Set) .display_order(0) ) @@ -1283,14 +1263,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("disable-lock-timeouts") - .long("disable-lock-timeouts") - .help("This flag is deprecated and has no effect.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("disable-proposer-reorgs") .long("disable-proposer-reorgs") @@ -1503,14 +1475,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("always-prefer-builder-payload") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .long("always-prefer-builder-payload") - .help("This flag is deprecated and has no effect.") - .display_order(0) - ) .arg( Arg::new("invalid-gossip-verified-blocks-path") .action(ArgAction::Set) @@ -1522,14 +1486,6 @@ pub fn cli_app() -> Command { filling up their disks.") .display_order(0) ) - .arg( - Arg::new("progressive-balances") - .long("progressive-balances") - .value_name("MODE") - .help("Deprecated. This optimisation is now the default and cannot be disabled.") - .action(ArgAction::Set) - .display_order(0) - ) .arg( Arg::new("beacon-processor-max-workers") .long("beacon-processor-max-workers") @@ -1591,13 +1547,5 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("disable-duplicate-warn-logs") - .long("disable-duplicate-warn-logs") - .help("This flag is deprecated and has no effect.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0eff8577c4a..ecadee5f476 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -152,14 +152,6 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.get_one::("http-spec-fork").is_some() { - warn!( - log, - "Ignoring --http-spec-fork"; - "info" => "this flag is deprecated and will be removed" - ); - } - if cli_args.get_flag("http-enable-tls") { client_config.http_api.tls_config = Some(TlsConfig { cert: cli_args @@ -175,14 +167,6 @@ pub fn get_config( }); } - if cli_args.get_flag("http-allow-sync-stalled") { - warn!( - log, - "Ignoring --http-allow-sync-stalled"; - "info" => "this flag is deprecated and will be removed" - ); - } - client_config.http_api.sse_capacity_multiplier = parse_required(cli_args, "http-sse-capacity-multiplier")?; @@ -300,100 +284,94 @@ pub fn get_config( client_config.eth1.cache_follow_distance = Some(follow_distance); } - if let Some(endpoints) = cli_args.get_one::("execution-endpoint") { - let mut el_config = execution_layer::Config::default(); + // `--execution-endpoint` is required now. + let endpoints: String = clap_utils::parse_required(cli_args, "execution-endpoint")?; + let mut el_config = execution_layer::Config::default(); - // Always follow the deposit contract when there is an execution endpoint. - // - // This is wasteful for non-staking nodes as they have no need to process deposit contract - // logs and build an "eth1" cache. The alternative is to explicitly require the `--eth1` or - // `--staking` flags, however that poses a risk to stakers since they cannot produce blocks - // without "eth1". - // - // The waste for non-staking nodes is relatively small so we err on the side of safety for - // stakers. The merge is already complicated enough. - client_config.sync_eth1_chain = true; - - // Parse a single execution endpoint, logging warnings if multiple endpoints are supplied. - let execution_endpoint = - parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?; - - // JWTs are required if `--execution-endpoint` is supplied. They can be either passed via - // file_path or directly as string. - - let secret_file: PathBuf; - // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. - if let Some(secret_files) = cli_args.get_one::("execution-jwt") { - secret_file = - parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; - - // Check if the JWT secret key is passed directly via cli flag and persist it to the default - // file location. - } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") - { - use std::fs::File; - use std::io::Write; - secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); - let mut jwt_secret_key_file = File::create(secret_file.clone()) - .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; - jwt_secret_key_file - .write_all(jwt_secret_key.as_bytes()) - .map_err(|e| { - format!( - "Error occurred while writing to jwt_secret_key file: {:?}", - e - ) - })?; - } else { - return Err("Error! Please set either --execution-jwt file_path or --execution-jwt-secret-key directly via cli when using --execution-endpoint".to_string()); - } - - // Parse and set the payload builder, if any. - if let Some(endpoint) = cli_args.get_one::("builder") { - let payload_builder = - parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; - el_config.builder_url = Some(payload_builder); - - el_config.builder_user_agent = - clap_utils::parse_optional(cli_args, "builder-user-agent")?; - - el_config.builder_header_timeout = - clap_utils::parse_optional(cli_args, "builder-header-timeout")? - .map(Duration::from_millis); - } - - if cli_args.get_flag("always-prefer-builder-payload") { - warn!( - log, - "Ignoring --always-prefer-builder-payload"; - "info" => "this flag is deprecated and will be removed" - ); - } - - // Set config values from parse values. - el_config.secret_file = Some(secret_file.clone()); - el_config.execution_endpoint = Some(execution_endpoint.clone()); - el_config.suggested_fee_recipient = - clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; - el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; - el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; - el_config - .default_datadir - .clone_from(client_config.data_dir()); - let execution_timeout_multiplier = - clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; - el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); - - client_config.eth1.endpoint = Eth1Endpoint::Auth { - endpoint: execution_endpoint, - jwt_path: secret_file, - jwt_id: el_config.jwt_id.clone(), - jwt_version: el_config.jwt_version.clone(), - }; + // Always follow the deposit contract when there is an execution endpoint. + // + // This is wasteful for non-staking nodes as they have no need to process deposit contract + // logs and build an "eth1" cache. The alternative is to explicitly require the `--eth1` or + // `--staking` flags, however that poses a risk to stakers since they cannot produce blocks + // without "eth1". + // + // The waste for non-staking nodes is relatively small so we err on the side of safety for + // stakers. The merge is already complicated enough. + client_config.sync_eth1_chain = true; + + // Parse a single execution endpoint, logging warnings if multiple endpoints are supplied. + let execution_endpoint = parse_only_one_value( + endpoints.as_str(), + SensitiveUrl::parse, + "--execution-endpoint", + log, + )?; + + // JWTs are required if `--execution-endpoint` is supplied. They can be either passed via + // file_path or directly as string. + + let secret_file: PathBuf; + // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. + if let Some(secret_files) = cli_args.get_one::("execution-jwt") { + secret_file = + parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; + + // Check if the JWT secret key is passed directly via cli flag and persist it to the default + // file location. + } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") { + use std::fs::File; + use std::io::Write; + secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); + let mut jwt_secret_key_file = File::create(secret_file.clone()) + .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; + jwt_secret_key_file + .write_all(jwt_secret_key.as_bytes()) + .map_err(|e| { + format!( + "Error occurred while writing to jwt_secret_key file: {:?}", + e + ) + })?; + } else { + return Err("Error! Please set either --execution-jwt file_path or --execution-jwt-secret-key directly via cli when using --execution-endpoint".to_string()); + } + + // Parse and set the payload builder, if any. + if let Some(endpoint) = cli_args.get_one::("builder") { + let payload_builder = + parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; + el_config.builder_url = Some(payload_builder); + + el_config.builder_user_agent = clap_utils::parse_optional(cli_args, "builder-user-agent")?; + + el_config.builder_header_timeout = + clap_utils::parse_optional(cli_args, "builder-header-timeout")? + .map(Duration::from_millis); + } + + // Set config values from parse values. + el_config.secret_file = Some(secret_file.clone()); + el_config.execution_endpoint = Some(execution_endpoint.clone()); + el_config.suggested_fee_recipient = + clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; + el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; + el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; + el_config + .default_datadir + .clone_from(client_config.data_dir()); + let execution_timeout_multiplier = + clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; + el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); + + client_config.eth1.endpoint = Eth1Endpoint::Auth { + endpoint: execution_endpoint, + jwt_path: secret_file, + jwt_id: el_config.jwt_id.clone(), + jwt_version: el_config.jwt_version.clone(), + }; - // Store the EL config in the client config. - client_config.execution_layer = Some(el_config); - } + // Store the EL config in the client config. + client_config.execution_layer = Some(el_config); // 4844 params if let Some(trusted_setup) = context @@ -787,14 +765,6 @@ pub fn get_config( .individual_tracking_threshold = count; } - if cli_args.get_flag("disable-lock-timeouts") { - warn!( - log, - "Ignoring --disable-lock-timeouts"; - "info" => "this flag is deprecated and will be removed" - ); - } - if cli_args.get_flag("disable-proposer-reorgs") { client_config.chain.re_org_head_threshold = None; client_config.chain.re_org_parent_threshold = None; @@ -894,14 +864,6 @@ pub fn get_config( client_config.network.invalid_block_storage = Some(path); } - if cli_args.get_one::("progressive-balances").is_some() { - warn!( - log, - "Progressive balances mode is deprecated"; - "info" => "please remove --progressive-balances" - ); - } - if let Some(max_workers) = clap_utils::parse_optional(cli_args, "beacon-processor-max-workers")? { client_config.beacon_processor.max_workers = max_workers; @@ -1487,6 +1449,20 @@ pub fn set_network_config( Some(Default::default()) } }; + + if let Some(idontwant_message_size_threshold) = + cli_args.get_one::("idontwant-message-size-threshold") + { + config.idontwant_message_size_threshold = idontwant_message_size_threshold + .parse::() + .map_err(|_| { + format!( + "Invalid idontwant message size threshold value passed: {}", + idontwant_message_size_threshold + ) + })?; + } + Ok(()) } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index cdb18b3b9cb..aac1ee26e14 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -20,7 +20,7 @@ safe_arith = { workspace = true } state_processing = { workspace = true } slog = { workspace = true } serde = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } lru = { workspace = true } sloggers = { workspace = true } directory = { workspace = true } diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index c70ef898692..5f8ed8f5e73 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -21,7 +21,6 @@ where .try_fold(vec![], |mut ops, state_root| { let state_root = state_root?; ops.push(StoreOp::DeleteState(state_root, None)); - ops.push(StoreOp::DeleteStateTemporaryFlag(state_root)); Result::<_, Error>::Ok(ops) })?; @@ -29,7 +28,7 @@ where debug!( self.log, "Garbage collecting {} temporary states", - delete_ops.len() / 2 + delete_ops.len() ); self.do_atomically_with_block_and_blobs_cache(delete_ops)?; } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index ba288039d6b..5483c490dcd 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -44,7 +44,6 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; -use types::light_client_update::CurrentSyncCommitteeProofLen; use types::*; /// On-disk database that stores finalized states efficiently. @@ -641,15 +640,14 @@ impl, Cold: ItemStore> HotColdDB pub fn get_sync_committee_branch( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result, Error> { let column = DBColumn::SyncCommitteeBranch; if let Some(bytes) = self .hot_db .get_bytes(column.into(), &block_root.as_ssz_bytes())? { - let sync_committee_branch: FixedVector = - FixedVector::from_ssz_bytes(&bytes)?; + let sync_committee_branch = Vec::::from_ssz_bytes(&bytes)?; return Ok(Some(sync_committee_branch)); } @@ -677,7 +675,7 @@ impl, Cold: ItemStore> HotColdDB pub fn store_sync_committee_branch( &self, block_root: Hash256, - sync_committee_branch: &FixedVector, + sync_committee_branch: &MerkleProof, ) -> Result<(), Error> { let column = DBColumn::SyncCommitteeBranch; self.hot_db.put_bytes( @@ -1160,10 +1158,19 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::DeleteState(state_root, slot) => { + // Delete the hot state summary. let state_summary_key = get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_slice()); key_value_batch.push(KeyValueStoreOp::DeleteKey(state_summary_key)); + // Delete the state temporary flag (if any). Temporary flags are commonly + // created by the state advance routine. + let state_temp_key = get_key_for_col( + DBColumn::BeaconStateTemporary.into(), + state_root.as_slice(), + ); + key_value_batch.push(KeyValueStoreOp::DeleteKey(state_temp_key)); + if slot.map_or(true, |slot| slot % E::slots_per_epoch() == 0) { let state_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 902c440be86..1921b9b3273 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *}; +pub use metrics::{set_gauge, try_create_int_gauge, *}; use directory::size_of_dir; use std::path::Path; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7fb0b2f4e70..c38ee58e3b0 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -15,6 +15,7 @@ * [The `validator-manager` Command](./validator-manager.md) * [Creating validators](./validator-manager-create.md) * [Moving validators](./validator-manager-move.md) + * [Managing validators](./validator-manager-api.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Partial Withdrawals](./partial-withdrawal.md) @@ -54,13 +55,13 @@ * [Merge Migration](./merge-migration.md) * [Late Block Re-orgs](./late-block-re-orgs.md) * [Blobs](./advanced-blobs.md) -* [Built-In Documentation](./help_general.md) +* [Command Line Reference (CLI)](./help_general.md) * [Beacon Node](./help_bn.md) * [Validator Client](./help_vc.md) * [Validator Manager](./help_vm.md) * [Create](./help_vm_create.md) * [Import](./help_vm_import.md) - * [Move](./help_vm_move.md) + * [Move](./help_vm_move.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 6cb66859128..80eba7a0590 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -230,7 +230,6 @@ Example Response Body "TERMINAL_TOTAL_DIFFICULTY": "10790000", "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH": "18446744073709551615", - "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY": "128", "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT": "16384", "MIN_GENESIS_TIME": "1614588812", "GENESIS_FORK_VERSION": "0x00001020", @@ -263,7 +262,6 @@ Example Response Body "HYSTERESIS_QUOTIENT": "4", "HYSTERESIS_DOWNWARD_MULTIPLIER": "1", "HYSTERESIS_UPWARD_MULTIPLIER": "5", - "SAFE_SLOTS_TO_UPDATE_JUSTIFIED": "8", "MIN_DEPOSIT_AMOUNT": "1000000000", "MAX_EFFECTIVE_BALANCE": "32000000000", "EFFECTIVE_BALANCE_INCREMENT": "1000000000", diff --git a/book/src/cli.md b/book/src/cli.md deleted file mode 100644 index f9e7df07488..00000000000 --- a/book/src/cli.md +++ /dev/null @@ -1,55 +0,0 @@ -# Command-Line Interface (CLI) - -The `lighthouse` binary provides all necessary Ethereum consensus client functionality. It -has two primary sub-commands: - -- `$ lighthouse beacon_node`: the largest and most fundamental component which connects to - the p2p network, processes messages and tracks the head of the beacon - chain. -- `$ lighthouse validator_client`: a lightweight but important component which loads a validators private - key and signs messages using a `beacon_node` as a source-of-truth. - -There are also some ancillary binaries like `lcli` and `account_manager`, but -these are primarily for testing. - -> **Note:** documentation sometimes uses `$ lighthouse bn` and `$ lighthouse -> vc` instead of the long-form `beacon_node` and `validator_client`. These -> commands are valid on the CLI too. - -## Installation - -Typical users may install `lighthouse` to `CARGO_HOME` with `cargo install ---path lighthouse` from the root of the repository. See ["Configuring the -`PATH` environment variable"](https://www.rust-lang.org/tools/install) for more -information. - -For developers, we recommend building Lighthouse using the `$ cargo build --release ---bin lighthouse` command and executing binaries from the -`/target/release` directory. This is more ergonomic when -modifying and rebuilding regularly. - -## Documentation - -Each binary supports the `--help` flag, this is the best source of -documentation. - -```bash -lighthouse beacon_node --help -``` - -```bash -lighthouse validator_client --help -``` - -## Creating a new database/testnet - -Lighthouse should run out-of-the box and connect to the current testnet -maintained by Sigma Prime. - -However, for developers, testnets can be created by following the instructions -outlined in [testnets](./testnets.md). The steps listed here will create a -local database specified to a new testnet. - -## Resuming from an existing database - -Once a database/testnet has been created, it can be resumed by running `$ lighthouse bn`. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 733446e5d27..fa4a473ec02 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -5,7 +5,7 @@ The primary component which connects to the Ethereum 2.0 P2P network and downloads, verifies and stores blocks. Provides a HTTP API for querying the beacon chain and publishing messages to the network. -Usage: lighthouse beacon_node [OPTIONS] +Usage: lighthouse beacon_node [OPTIONS] --execution-endpoint Options: --auto-compact-db @@ -292,9 +292,6 @@ Options: which don't improve their payload after the first call, and high values are useful for ensuring the EL is given ample notice. Default: 1/3 of a slot. - --progressive-balances - Deprecated. This optimisation is now the default and cannot be - disabled. --proposer-reorg-cutoff Maximum delay after the start of the slot at which to propose a reorging block. Lower values can prevent failed reorgs by ensuring the @@ -329,14 +326,6 @@ Options: --quic-port6 The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + 1 - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --self-limiter-protocols Enables the outbound rate limiter (requests made by this node).Rate limit quotas per protocol can be set in the form of @@ -390,27 +379,6 @@ Options: database. --target-peers The target number of peers. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --trusted-peers One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system. @@ -445,8 +413,6 @@ Flags: incompatible with data availability checks. Checkpoint syncing is the preferred method for syncing a node. Only use this flag when testing. DO NOT use on mainnet! - --always-prefer-builder-payload - This flag is deprecated and has no effect. --always-prepare-payload Send payload attributes with every fork choice update. This is intended for use by block builders, relays and developers. You should @@ -470,8 +436,6 @@ Flags: Explicitly disables syncing of deposit logs from the execution node. This overrides any previous option that depends on it. Useful if you intend to run a non-validating beacon node. - --disable-duplicate-warn-logs - This flag is deprecated and has no effect. --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. This @@ -479,8 +443,6 @@ Flags: boot. --disable-inbound-rate-limiter Disables the inbound rate limiter (requests received by this node). - --disable-lock-timeouts - This flag is deprecated and has no effect. --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning diff --git a/book/src/help_general.md b/book/src/help_general.md index 1c2d1266d08..996b048d10a 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -1,4 +1,4 @@ -# Lighthouse General Commands +# Lighthouse CLI Reference ``` Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a @@ -77,39 +77,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir

Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. -V, --version Print version @@ -122,9 +93,6 @@ Flags: debugging specific memory allocation issues. -h, --help Prints help information - -l - DEPRECATED Enables environment logging giving access to sub-protocol - logs such as discv5 and libp2p --log-color Force outputting colors when emitting logs to the terminal. --logfile-compress diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 23a84919936..2cfbfbc857a 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -118,14 +118,6 @@ Options: specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes. - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --secrets-dir The directory which contains the password to unlock the validator voting keypairs. Each password should be contained in a file where the @@ -140,27 +132,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validator-registration-batch-size Defines the number of validators per validator/register_validator request sent to the BN. This value can be reduced to avoid timeouts @@ -210,12 +181,6 @@ Flags: If present, do not configure the system allocator. Providing this flag will generally increase memory usage, it should only be provided when debugging specific memory allocation issues. - --disable-run-on-all - DEPRECATED. Use --broadcast. By default, Lighthouse publishes - attestation, sync committee subscriptions and proposer preparation - messages to all beacon nodes provided in the `--beacon-nodes flag`. - This option changes that behaviour such that these api calls only go - out to the first available and synced beacon node --disable-slashing-protection-web3signer Disable Lighthouse's slashing protection for all web3signer keys. This can reduce the I/O burden on the VC but is only safe if slashing @@ -280,8 +245,6 @@ Flags: --prefer-builder-proposals If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. - --produce-block-v3 - This flag is deprecated and is no longer in use. --stdin-inputs If present, read all user inputs from stdin instead of tty. --unencrypted-http-transport diff --git a/book/src/help_vm.md b/book/src/help_vm.md index f787985b215..50c204f371c 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -23,6 +23,11 @@ Commands: "create-validators" command. This command only supports validators signing via a keystore on the local file system (i.e., not Web3Signer validators). + list + Lists all validators in a validator client using the HTTP API. + delete + Deletes one or more validators from a validator client using the HTTP + API. help Print this message or the help of the given subcommand(s) @@ -69,39 +74,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-log-timestamp diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 1803bb534c6..2743117eae2 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -91,14 +91,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --suggested-fee-recipient All created validators will use this value for the suggested fee recipient. Omit this flag to use the default value from the VC. @@ -106,34 +98,15 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-deposits When provided don't generate the deposits JSON file that is commonly used for submitting validator deposits via a web UI. Using this flag will save several seconds per validator if the user has an alternate - strategy for submitting deposits. + strategy for submitting deposits. If used, the + --force-bls-withdrawal-credentials is also required to ensure users + are aware that an --eth1-withdrawal-address is not set. --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 0883139ad21..68aab768aed 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -5,9 +5,17 @@ Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be generated using the "create-validators" command. -Usage: lighthouse validator_manager import [OPTIONS] --validators-file +Usage: lighthouse validator_manager import [OPTIONS] Options: + --builder-boost-factor + When provided, the imported validator will use this percentage + multiplier to apply to the builder's payload value when choosing + between a builder payload header and payload from the local execution + node. + --builder-proposals + When provided, the imported validator will attempt to create blocks + via builder rather than the local EL. [possible values: true, false] -d, --datadir Used to specify a custom root data directory for lighthouse keys and databases. Defaults to $HOME/.lighthouse/{network} where network is @@ -17,6 +25,10 @@ Options: Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: info, debug, trace, warn, error, crit] + --gas-limit + When provided, the imported validator will use this gas limit. It is + recommended to leave this as the default value by not specifying this + flag. --genesis-state-url A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server URLs can generally be used with @@ -26,6 +38,10 @@ Options: --genesis-state-url-timeout The timeout in seconds for the request to --genesis-state-url. [default: 180] + --keystore-file + The path to a keystore JSON file to be imported to the validator + client. This file is usually created using staking-deposit-cli or + ethstaker-deposit-cli --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] @@ -50,49 +66,27 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. + --password + Password of the keystore file. + --prefer-builder-proposals + When provided, the imported validator will always prefer blocks + constructed by builders, regardless of payload value. [possible + values: true, false] + --suggested-fee-recipient + When provided, the imported validator will use the suggested fee + recipient. Omit this flag to use the default value from the VC. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators-file The path to a JSON file containing a list of validators to be imported to the validator client. This file is usually named "validators.json". --vc-token The file containing a token required by the validator client. --vc-url - A HTTP(S) address of a validator client using the keymanager-API. If - this value is not supplied then a 'dry run' will be conducted where no - changes are made to the validator client. [default: - http://localhost:5062] + A HTTP(S) address of a validator client using the keymanager-API. + [default: http://localhost:5062] Flags: --disable-log-timestamp diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index 12dd1e91402..99eee32c782 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -74,14 +74,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --src-vc-token The file containing a token required by the source validator client. --src-vc-url @@ -95,27 +87,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". diff --git a/book/src/redundancy.md b/book/src/redundancy.md index ee685a17cf7..daf0eb4a5b4 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -74,8 +74,7 @@ lighthouse bn \ Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and `--import-all-attestations` flags. These flags are no longer required as the validator client will now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour -can be disabled using the `--broadcast none` flag for `lighthouse vc` (or `--disable-run-on-all` -[deprecated]). +can be disabled using the `--broadcast none` flag for `lighthouse vc`. ### Broadcast modes diff --git a/book/src/validator-manager-api.md b/book/src/validator-manager-api.md new file mode 100644 index 00000000000..a5fc69fd5ad --- /dev/null +++ b/book/src/validator-manager-api.md @@ -0,0 +1,39 @@ +# Managing Validators + +The `lighthouse validator-manager` uses the [Keymanager API](https://ethereum.github.io/keymanager-APIs/#/) to list, import and delete keystores via the HTTP API. This requires the validator client running with the flag `--http`. + +## Delete + +The `delete` command deletes one or more validators from the validator client. It will also modify the `validator_definitions.yml` file automatically so there is no manual action required from the user after the delete. To `delete`: + +```bash +lighthouse vm delete --vc-token --validators pubkey1,pubkey2 +``` + +Example: + +```bash +lighthouse vm delete --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators 0x8885c29b8f88ee9b9a37b480fd4384fed74bda33d85bc8171a904847e65688b6c9bb4362d6597fd30109fb2def6c3ae4,0xa262dae3dcd2b2e280af534effa16bedb27c06f2959e114d53bd2a248ca324a018dc73179899a066149471a94a1bc92f +``` + +## Import + +The `import` command imports validator keystores generated by the staking-deposit-cli/ethstaker-deposit-cli. To import a validator keystore: + +```bash +lighthouse vm import --vc-token --keystore-file /path/to/json --password keystore_password +``` + +Example: + +``` +lighthouse vm import --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --keystore-file keystore.json --password keystore_password +``` + +## List + +To list the validators running on the validator client: + +```bash +lighthouse vm list --vc-token ~/.lighthouse/mainnet/validators/api-token.txt +``` diff --git a/book/src/validator-manager-create.md b/book/src/validator-manager-create.md index d97f953fc19..b4c86dc6da8 100644 --- a/book/src/validator-manager-create.md +++ b/book/src/validator-manager-create.md @@ -69,6 +69,8 @@ lighthouse \ > Be sure to remove `./validators.json` after the import is successful since it > contains unencrypted validator keystores. +> Note: To import validators with validator-manager using keystore files created using the staking deposit CLI, refer to [Managing Validators](./validator-manager-api.md#import). + ## Detailed Guide This guide will create two validators and import them to a VC. For simplicity, diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 2c8bbbf4b4e..c1fa621abb1 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -9,13 +9,16 @@ use eth2_wallet::{ use filesystem::{create_with_600_perms, Error as FsError}; use rand::{distributions::Alphanumeric, Rng}; use serde::{Deserialize, Serialize}; -use std::fs::{self, File}; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::str::from_utf8; use std::thread::sleep; use std::time::Duration; +use std::{ + fs::{self, File}, + str::FromStr, +}; use zeroize::Zeroize; pub mod validator_definitions; @@ -215,6 +218,14 @@ pub fn mnemonic_from_phrase(phrase: &str) -> Result { #[serde(transparent)] pub struct ZeroizeString(String); +impl FromStr for ZeroizeString { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(Self(s.to_owned())) + } +} + impl From for ZeroizeString { fn from(s: String) -> Self { Self(s) diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index cba7399c9bf..a4b5f4dc1c4 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,6 +1,5 @@ //! A helper library for parsing values from `clap::ArgMatches`. -use alloy_primitives::U256 as Uint256; use clap::builder::styling::*; use clap::ArgMatches; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; @@ -30,38 +29,9 @@ pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result(cli_args, "terminal-total-difficulty-override")? - { - let stripped = string.replace(',', ""); - let terminal_total_difficulty = Uint256::from_str(&stripped).map_err(|e| { - format!( - "Could not parse --terminal-total-difficulty-override as decimal value: {:?}", - e - ) - })?; - - eth2_network_config.config.terminal_total_difficulty = terminal_total_difficulty; - } - - if let Some(hash) = parse_optional(cli_args, "terminal-block-hash-override")? { - eth2_network_config.config.terminal_block_hash = hash; - } - - if let Some(epoch) = parse_optional(cli_args, "terminal-block-hash-epoch-override")? { - eth2_network_config - .config - .terminal_block_hash_activation_epoch = epoch; - } - - if let Some(slots) = parse_optional(cli_args, "safe-slots-to-import-optimistically")? { - eth2_network_config - .config - .safe_slots_to_import_optimistically = slots; - } - Ok(eth2_network_config) } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index e1550fdee24..c187399ebd7 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1678,27 +1678,23 @@ impl FullBlockContents { bytes: &[u8], fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - .map(|block| FullBlockContents::Block(block)) - } - ForkName::Deneb | ForkName::Electra => { - let mut builder = ssz::SszDecoderBuilder::new(bytes); + if fork_name.deneb_enabled() { + let mut builder = ssz::SszDecoderBuilder::new(bytes); - builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; - builder.register_type::>()?; + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + builder.register_type::>()?; - let mut decoder = builder.build()?; - let block = decoder.decode_next_with(|bytes| { - BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - })?; - let kzg_proofs = decoder.decode_next()?; - let blobs = decoder.decode_next()?; + let mut decoder = builder.build()?; + let block = decoder + .decode_next_with(|bytes| BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name))?; + let kzg_proofs = decoder.decode_next()?; + let blobs = decoder.decode_next()?; - Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs)))) - } + Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs)))) + } else { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + .map(|block| FullBlockContents::Block(block)) } } @@ -1738,15 +1734,14 @@ impl ForkVersionDeserialize for FullBlockContents { value: serde_json::value::Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - Ok(FullBlockContents::Block( - BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, - )) - } - ForkName::Deneb | ForkName::Electra => Ok(FullBlockContents::BlockContents( + if fork_name.deneb_enabled() { + Ok(FullBlockContents::BlockContents( BlockContents::deserialize_by_fork::<'de, D>(value, fork_name)?, - )), + )) + } else { + Ok(FullBlockContents::Block( + BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, + )) } } } @@ -1838,28 +1833,25 @@ impl PublishBlockRequest { /// SSZ decode with fork variant determined by `fork_name`. pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { + if fork_name.deneb_enabled() { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + builder.register_type::>()?; + + let mut decoder = builder.build()?; + let block = decoder.decode_next_with(|bytes| { SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - .map(|block| PublishBlockRequest::Block(Arc::new(block))) - } - ForkName::Deneb | ForkName::Electra => { - let mut builder = ssz::SszDecoderBuilder::new(bytes); - builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; - builder.register_type::>()?; - - let mut decoder = builder.build()?; - let block = decoder.decode_next_with(|bytes| { - SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - })?; - let kzg_proofs = decoder.decode_next()?; - let blobs = decoder.decode_next()?; - Ok(PublishBlockRequest::new( - Arc::new(block), - Some((kzg_proofs, blobs)), - )) - } + })?; + let kzg_proofs = decoder.decode_next()?; + let blobs = decoder.decode_next()?; + Ok(PublishBlockRequest::new( + Arc::new(block), + Some((kzg_proofs, blobs)), + )) + } else { + SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + .map(|block| PublishBlockRequest::Block(Arc::new(block))) } } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 74fca4c5010..1eca01bbeef 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -140,4 +140,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 07bd21b35c2..500555a2694 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -123,4 +123,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 67f1e5b6831..d67d77d3bea 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -127,4 +127,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index acf4d83f323..18591fecdcd 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -149,4 +149,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 8b84d870103..b08a6180bf0 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -123,4 +123,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml deleted file mode 100644 index fe966f4a9c6..00000000000 --- a/common/lighthouse_metrics/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "lighthouse_metrics" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -prometheus = "0.13.0" diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index cac6d073f25..73cbdf44d42 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -9,7 +9,7 @@ test_logger = [] # Print log output to stderr when running tests instead of drop [dependencies] chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index a4a1acabd48..4bb37392984 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,6 +1,4 @@ -use lighthouse_metrics::{ - inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult, -}; +use metrics::{inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult}; use slog::Logger; use slog_term::Decorator; use std::io::{Result, Write}; @@ -217,6 +215,19 @@ impl TimeLatch { } pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { + let mut tracing_log_path = PathBuf::new(); + + // Ensure that `tracing_log_path` only contains directories. + for p in base_tracing_log_path.iter() { + tracing_log_path = tracing_log_path.join(p); + if let Ok(metadata) = tracing_log_path.metadata() { + if !metadata.is_dir() { + tracing_log_path.pop(); + break; + } + } + } + let filter_layer = match tracing_subscriber::EnvFilter::try_from_default_env() .or_else(|_| tracing_subscriber::EnvFilter::try_new("warn")) { @@ -232,7 +243,7 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { .max_log_files(2) .filename_prefix("libp2p") .filename_suffix("log") - .build(base_tracing_log_path.clone()) + .build(tracing_log_path.clone()) else { eprintln!("Failed to initialize libp2p rolling file appender"); return; @@ -243,7 +254,7 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { .max_log_files(2) .filename_prefix("discv5") .filename_suffix("log") - .build(base_tracing_log_path.clone()) + .build(tracing_log_path) else { eprintln!("Failed to initialize discv5 rolling file appender"); return; diff --git a/common/logging/src/tracing_metrics_layer.rs b/common/logging/src/tracing_metrics_layer.rs index 89a1f4d1f16..5d272adbf59 100644 --- a/common/logging/src/tracing_metrics_layer.rs +++ b/common/logging/src/tracing_metrics_layer.rs @@ -1,6 +1,5 @@ //! Exposes [`MetricsLayer`]: A tracing layer that registers metrics of logging events. -use lighthouse_metrics as metrics; use std::sync::LazyLock; use tracing_log::NormalizeEvent; diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index b91e68c518e..79a07eed166 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } libc = "0.2.79" parking_lot = { workspace = true } tikv-jemalloc-ctl = { version = "0.6.0", optional = true, features = ["stats"] } diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 41d8d28291d..30313d06723 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -4,7 +4,7 @@ //! https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html //! //! These functions are generally only suitable for Linux systems. -use lighthouse_metrics::*; +use metrics::*; use parking_lot::Mutex; use std::env; use std::os::raw::c_int; @@ -38,60 +38,57 @@ pub static GLOBAL_LOCK: LazyLock> = LazyLock::new(|| <_>::default()); // Metrics for the malloc. For more information, see: // // https://man7.org/linux/man-pages/man3/mallinfo.3.html -pub static MALLINFO_ARENA: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_ARENA: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_arena", "The total amount of memory allocated by means other than mmap(2). \ This figure includes both in-use blocks and blocks on the free list.", ) }); -pub static MALLINFO_ORDBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_ORDBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_ordblks", "The number of ordinary (i.e., non-fastbin) free blocks.", ) }); -pub static MALLINFO_SMBLKS: LazyLock> = +pub static MALLINFO_SMBLKS: LazyLock> = LazyLock::new(|| try_create_int_gauge("mallinfo_smblks", "The number of fastbin free blocks.")); -pub static MALLINFO_HBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_HBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_hblks", "The number of blocks currently allocated using mmap.", ) }); -pub static MALLINFO_HBLKHD: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_HBLKHD: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_hblkhd", "The number of bytes in blocks currently allocated using mmap.", ) }); -pub static MALLINFO_FSMBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_FSMBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_fsmblks", "The total number of bytes in fastbin free blocks.", ) }); -pub static MALLINFO_UORDBLKS: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_uordblks", - "The total number of bytes used by in-use allocations.", - ) - }); -pub static MALLINFO_FORDBLKS: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_fordblks", - "The total number of bytes in free blocks.", - ) - }); -pub static MALLINFO_KEEPCOST: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_keepcost", - "The total amount of releasable free space at the top of the heap..", - ) - }); +pub static MALLINFO_UORDBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_uordblks", + "The total number of bytes used by in-use allocations.", + ) +}); +pub static MALLINFO_FORDBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_fordblks", + "The total number of bytes in free blocks.", + ) +}); +pub static MALLINFO_KEEPCOST: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_keepcost", + "The total amount of releasable free space at the top of the heap..", + ) +}); /// Calls `mallinfo` and updates Prometheus metrics with the results. pub fn scrape_mallinfo_metrics() { diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index a392a74e8f1..0e2e00cb0ef 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -7,7 +7,7 @@ //! //! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. //! B) `_RJEM_MALLOC_CONF` at runtime. -use lighthouse_metrics::{set_gauge, try_create_int_gauge, IntGauge}; +use metrics::{set_gauge, try_create_int_gauge, IntGauge}; use std::sync::LazyLock; use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; @@ -15,22 +15,22 @@ use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; // Metrics for jemalloc. -pub static NUM_ARENAS: LazyLock> = +pub static NUM_ARENAS: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_num_arenas", "The number of arenas in use")); -pub static BYTES_ALLOCATED: LazyLock> = LazyLock::new(|| { +pub static BYTES_ALLOCATED: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_allocated", "Equivalent to stats.allocated") }); -pub static BYTES_ACTIVE: LazyLock> = +pub static BYTES_ACTIVE: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_active", "Equivalent to stats.active")); -pub static BYTES_MAPPED: LazyLock> = +pub static BYTES_MAPPED: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_mapped", "Equivalent to stats.mapped")); -pub static BYTES_METADATA: LazyLock> = LazyLock::new(|| { +pub static BYTES_METADATA: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_metadata", "Equivalent to stats.metadata") }); -pub static BYTES_RESIDENT: LazyLock> = LazyLock::new(|| { +pub static BYTES_RESIDENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_resident", "Equivalent to stats.resident") }); -pub static BYTES_RETAINED: LazyLock> = LazyLock::new(|| { +pub static BYTES_RETAINED: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_retained", "Equivalent to stats.retained") }); diff --git a/common/metrics/Cargo.toml b/common/metrics/Cargo.toml new file mode 100644 index 00000000000..a7f4f4b967e --- /dev/null +++ b/common/metrics/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "metrics" +version = "0.2.0" +edition = { workspace = true } + +[dependencies] +prometheus = { workspace = true } diff --git a/common/lighthouse_metrics/src/lib.rs b/common/metrics/src/lib.rs similarity index 99% rename from common/lighthouse_metrics/src/lib.rs rename to common/metrics/src/lib.rs index 2a1e99defaf..1f2ac71aea5 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/metrics/src/lib.rs @@ -20,10 +20,10 @@ //! ## Example //! //! ```rust -//! use lighthouse_metrics::*; +//! use metrics::*; //! use std::sync::LazyLock; //! -//! // These metrics are "magically" linked to the global registry defined in `lighthouse_metrics`. +//! // These metrics are "magically" linked to the global registry defined in `metrics`. //! pub static RUN_COUNT: LazyLock> = LazyLock::new(|| try_create_int_counter( //! "runs_total", //! "Total number of runs" diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 55f18edd526..2da32c307ee 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -14,7 +14,7 @@ eth2 = { workspace = true } serde_json = { workspace = true } serde = { workspace = true } lighthouse_version = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } slog = { workspace = true } store = { workspace = true } regex = { workspace = true } diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index e157d82c11b..2f6c820f562 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -1,5 +1,5 @@ use super::types::{BeaconProcessMetrics, ValidatorProcessMetrics}; -use lighthouse_metrics::{MetricFamily, MetricType}; +use metrics::{MetricFamily, MetricType}; use serde_json::json; use std::collections::HashMap; use std::path::Path; @@ -155,7 +155,7 @@ fn get_value(mf: &MetricFamily) -> Option { /// Collects all metrics and returns a `serde_json::Value` object with the required metrics /// from the metrics hashmap. pub fn gather_metrics(metrics_map: &HashMap) -> Option { - let metric_families = lighthouse_metrics::gather(); + let metric_families = metrics::gather(); let mut res = serde_json::Map::with_capacity(metrics_map.len()); for mf in metric_families.iter() { let metric_name = mf.get_name(); diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index 13bcf006a9e..c2f330cd507 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -6,5 +6,5 @@ edition = { workspace = true } [dependencies] types = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } diff --git a/common/slot_clock/src/metrics.rs b/common/slot_clock/src/metrics.rs index 24023c9ed75..ec95e90d4af 100644 --- a/common/slot_clock/src/metrics.rs +++ b/common/slot_clock/src/metrics.rs @@ -1,5 +1,5 @@ use crate::SlotClock; -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; use types::{EthSpec, Slot}; diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 7928d4a3c97..26bcd7b339c 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -4,11 +4,17 @@ version = "0.1.0" authors = ["Sigma Prime "] edition = { workspace = true } +[features] +default = ["slog"] +slog = ["dep:slog", "dep:sloggers", "dep:logging"] +tracing = ["dep:tracing"] + [dependencies] async-channel = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } +slog = { workspace = true, optional = true } futures = { workspace = true } -lighthouse_metrics = { workspace = true } -sloggers = { workspace = true } -logging = { workspace = true } +metrics = { workspace = true } +sloggers = { workspace = true, optional = true } +logging = { workspace = true, optional = true } +tracing = { workspace = true, optional = true } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index d6edfd3121c..92ddb7c0be2 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -1,14 +1,20 @@ mod metrics; +#[cfg(not(feature = "tracing"))] pub mod test_utils; use futures::channel::mpsc::Sender; use futures::prelude::*; -use slog::{debug, o, trace}; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; pub use tokio::task::JoinHandle; +// Set up logging framework +#[cfg(not(feature = "tracing"))] +use slog::{debug, o}; +#[cfg(feature = "tracing")] +use tracing::debug; + /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { @@ -79,7 +85,7 @@ pub struct TaskExecutor { /// /// The task must provide a reason for shutting down. signal_tx: Sender, - + #[cfg(not(feature = "tracing"))] log: slog::Logger, } @@ -94,18 +100,20 @@ impl TaskExecutor { pub fn new>( handle: T, exit: async_channel::Receiver<()>, - log: slog::Logger, + #[cfg(not(feature = "tracing"))] log: slog::Logger, signal_tx: Sender, ) -> Self { Self { handle_provider: handle.into(), exit, signal_tx, + #[cfg(not(feature = "tracing"))] log, } } /// Clones the task executor adding a service name. + #[cfg(not(feature = "tracing"))] pub fn clone_with_name(&self, service_name: String) -> Self { TaskExecutor { handle_provider: self.handle_provider.clone(), @@ -115,6 +123,16 @@ impl TaskExecutor { } } + /// Clones the task executor adding a service name. + #[cfg(feature = "tracing")] + pub fn clone(&self) -> Self { + TaskExecutor { + handle_provider: self.handle_provider.clone(), + exit: self.exit.clone(), + signal_tx: self.signal_tx.clone(), + } + } + /// A convenience wrapper for `Self::spawn` which ignores a `Result` as long as both `Ok`/`Err` /// are of type `()`. /// @@ -150,10 +168,13 @@ impl TaskExecutor { drop(timer); }); } else { + #[cfg(not(feature = "tracing"))] debug!( self.log, "Couldn't spawn monitor task. Runtime shutting down" - ) + ); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn monitor task. Runtime shutting down"); } } @@ -175,7 +196,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime. This function does not wrap the task in an `async-channel::Receiver` /// like [spawn](#method.spawn). /// The caller of this function is responsible for wrapping up the task with an `async-channel::Receiver` to - /// ensure that the task gets canceled appropriately. + /// ensure that the task gets cancelled appropriately. /// This function generates prometheus metrics on number of tasks and task duration. /// /// This is useful in cases where the future to be spawned needs to do additional cleanup work when @@ -197,7 +218,10 @@ impl TaskExecutor { if let Some(handle) = self.handle() { handle.spawn(future); } else { + #[cfg(not(feature = "tracing"))] debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); } } } @@ -215,7 +239,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime wrapped in an `async-channel::Receiver` returning an optional /// join handle to the future. - /// The task is canceled when the corresponding async-channel is dropped. + /// The task is cancelled when the corresponding async-channel is dropped. /// /// This function generates prometheus metrics on number of tasks and task duration. pub fn spawn_handle( @@ -224,6 +248,8 @@ impl TaskExecutor { name: &'static str, ) -> Option>> { let exit = self.exit(); + + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { @@ -234,12 +260,12 @@ impl TaskExecutor { Some(handle.spawn(async move { futures::pin_mut!(exit); let result = match future::select(Box::pin(task), exit).await { - future::Either::Left((value, _)) => { - trace!(log, "Async task completed"; "task" => name); - Some(value) - } + future::Either::Left((value, _)) => Some(value), future::Either::Right(_) => { + #[cfg(not(feature = "tracing"))] debug!(log, "Async task shutdown, exit received"; "task" => name); + #[cfg(feature = "tracing")] + debug!(task = name, "Async task shutdown, exit received"); None } }; @@ -247,7 +273,10 @@ impl TaskExecutor { result })) } else { - debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(not(feature = "tracing"))] + debug!(log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); None } } else { @@ -270,6 +299,7 @@ impl TaskExecutor { F: FnOnce() -> R + Send + 'static, R: Send + 'static, { + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]); @@ -278,19 +308,22 @@ impl TaskExecutor { let join_handle = if let Some(handle) = self.handle() { handle.spawn_blocking(task) } else { + #[cfg(not(feature = "tracing"))] debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); return None; }; let future = async move { let result = match join_handle.await { - Ok(result) => { - trace!(log, "Blocking task completed"; "task" => name); - Ok(result) - } - Err(e) => { - debug!(log, "Blocking task ended unexpectedly"; "error" => %e); - Err(e) + Ok(result) => Ok(result), + Err(error) => { + #[cfg(not(feature = "tracing"))] + debug!(log, "Blocking task ended unexpectedly"; "error" => %error); + #[cfg(feature = "tracing")] + debug!(%error, "Blocking task ended unexpectedly"); + Err(error) } }; drop(timer); @@ -321,32 +354,48 @@ impl TaskExecutor { ) -> Option { let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]); metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); let handle = self.handle()?; let exit = self.exit(); - + #[cfg(not(feature = "tracing"))] debug!( log, "Starting block_on task"; "name" => name ); + #[cfg(feature = "tracing")] + debug!(name, "Starting block_on task"); + handle.block_on(async { let output = tokio::select! { output = future => { + #[cfg(not(feature = "tracing"))] debug!( log, "Completed block_on task"; "name" => name ); + #[cfg(feature = "tracing")] + debug!( + name, + "Completed block_on task" + ); Some(output) }, _ = exit => { + #[cfg(not(feature = "tracing"))] debug!( log, "Cancelled block_on task"; "name" => name, ); + #[cfg(feature = "tracing")] + debug!( + name, + "Cancelled block_on task" + ); None } }; @@ -376,6 +425,7 @@ impl TaskExecutor { } /// Returns a reference to the logger. + #[cfg(not(feature = "tracing"))] pub fn log(&self) -> &slog::Logger { &self.log } diff --git a/common/task_executor/src/metrics.rs b/common/task_executor/src/metrics.rs index a40bfdf4e72..bd4d6a50b9e 100644 --- a/common/task_executor/src/metrics.rs +++ b/common/task_executor/src/metrics.rs @@ -1,5 +1,5 @@ /// Handles async task metrics -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static ASYNC_TASKS_COUNT: LazyLock> = LazyLock::new(|| { diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 84f5ce5f189..a9407c392d9 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -17,6 +17,6 @@ serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } headers = "0.3.2" -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } serde_array_query = "0.1.0" bytes = { workspace = true } diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index 505d2775833..fabcf936507 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -1,5 +1,5 @@ use eth2::lighthouse::{ProcessHealth, SystemHealth}; -use lighthouse_metrics::*; +use metrics::*; use std::sync::LazyLock; pub static PROCESS_NUM_THREADS: LazyLock> = LazyLock::new(|| { diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 4a4f6e9086a..b32e0aa6656 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -12,7 +12,7 @@ state_processing = { workspace = true } proto_array = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } slog = { workspace = true } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ca59a6adfb6..85704042df4 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1300,43 +1300,6 @@ where } } - /// Returns `Ok(false)` if a block is not viable to be imported optimistically. - /// - /// ## Notes - /// - /// Equivalent to the function with the same name in the optimistic sync specs: - /// - /// https://github.com/ethereum/consensus-specs/blob/dev/sync/optimistic.md#helpers - pub fn is_optimistic_candidate_block( - &self, - current_slot: Slot, - block_slot: Slot, - block_parent_root: &Hash256, - spec: &ChainSpec, - ) -> Result> { - // If the block is sufficiently old, import it. - if block_slot + spec.safe_slots_to_import_optimistically <= current_slot { - return Ok(true); - } - - // If the parent block has execution enabled, always import the block. - // - // See: - // - // https://github.com/ethereum/consensus-specs/pull/2844 - if self - .proto_array - .get_block(block_parent_root) - .map_or(false, |parent| { - parent.execution_status.is_execution_enabled() - }) - { - return Ok(true); - } - - Ok(false) - } - /// Return the current finalized checkpoint. pub fn finalized_checkpoint(&self) -> Checkpoint { *self.fc_store.finalized_checkpoint() diff --git a/consensus/fork_choice/src/metrics.rs b/consensus/fork_choice/src/metrics.rs index eb0dbf435e3..b5cda2f5871 100644 --- a/consensus/fork_choice/src/metrics.rs +++ b/consensus/fork_choice/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; use types::EthSpec; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ce19d68203e..29265e34e4d 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -256,36 +256,6 @@ impl ForkChoiceTest { self } - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - /// - /// If the chain is presently in an unsafe period, transition through it and the following safe - /// period. - /// - /// Note: the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` variable has been removed - /// from the fork choice spec in Q1 2023. We're still leaving references to - /// it in our tests because (a) it's easier and (b) it allows us to easily - /// test for the absence of that parameter. - pub fn move_to_next_unsafe_period(self) -> Self { - self.move_inside_safe_to_update() - .move_outside_safe_to_update() - } - - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_outside_safe_to_update(self) -> Self { - while is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - - /// Moves to the next slot that is *inside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_inside_safe_to_update(self) -> Self { - while !is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. @@ -516,10 +486,6 @@ impl ForkChoiceTest { } } -fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { - slot % E::slots_per_epoch() < spec.safe_slots_to_update_justified -} - #[test] fn justified_and_finalized_blocks() { let tester = ForkChoiceTest::new(); @@ -536,15 +502,13 @@ fn justified_and_finalized_blocks() { assert!(fork_choice.get_finalized_block().is_ok()); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` +/// - The new justified checkpoint descends from the current. Near genesis. #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent_first_justification() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() - .move_inside_safe_to_update() .assert_justified_epoch(0) .apply_blocks(1) .await @@ -552,77 +516,29 @@ async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { } /// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is **not** the first justification since genesis #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) .await .unwrap() - .move_outside_safe_to_update() .assert_justified_epoch(2) .apply_blocks(1) .await .assert_justified_epoch(3); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - This is the first justification since genesis -#[tokio::test] -async fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .move_to_next_unsafe_period() - .assert_justified_epoch(0) - .apply_blocks(1) - .await - .assert_justified_epoch(2); -} - -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - Finalized epoch has **not** increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_inside_safe_to_update() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should not change. - state.finalized_checkpoint().epoch = Epoch::new(0); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await - .assert_justified_epoch(3); -} - /// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`. /// - Finalized epoch has **not** increased. #[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { +async fn justified_checkpoint_updates_with_non_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() .apply_blocks(1) .await - .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { // The finalized checkpoint should not change. @@ -636,36 +552,6 @@ async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_wit .unwrap(); }) .await - // Now that `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` has been removed, the new - // block should have updated the justified checkpoint. - .assert_justified_epoch(3); -} - -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - Finalized epoch has increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_to_next_unsafe_period() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should change. - state.finalized_checkpoint_mut().epoch = Epoch::new(1); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await .assert_justified_epoch(3); } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 7b7c6eb0c48..b7f6ef7b2a9 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -25,7 +25,7 @@ ethereum_hashing = { workspace = true } int_to_bytes = { workspace = true } smallvec = { workspace = true } arbitrary = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } derivative = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } rand = { workspace = true } diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs index af843b3acbc..101e8616835 100644 --- a/consensus/state_processing/src/common/update_progressive_balances_cache.rs +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -4,7 +4,7 @@ use crate::metrics::{ PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, }; use crate::{BlockProcessingError, EpochProcessingError}; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use types::{ is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EpochTotalBalances, EthSpec, ParticipationFlags, ProgressiveBalancesCache, Validator, diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index b0eaf3422d3..0c176d4ab14 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -147,6 +147,8 @@ impl ConsensusContext { } } + #[allow(unknown_lints)] + #[allow(elided_named_lifetimes)] pub fn get_indexed_attestation<'a>( &'a mut self, state: &BeaconState, diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index e6fe483776f..b53dee96d93 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index e7655b453a8..f289b6e0817 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -579,8 +579,7 @@ pub fn get_expected_withdrawals( .get_execution_withdrawal_address(spec) .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, amount: balance.safe_sub( - validator - .get_validator_max_effective_balance(spec, state.fork_name_unchecked()), + validator.get_max_effective_balance(spec, state.fork_name_unchecked()), )?, }); withdrawal_index.safe_add_assign(1)?; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 74166f67130..a53dc15126f 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -40,15 +40,13 @@ pub fn process_operations>( if state.fork_name_unchecked().electra_enabled() { state.update_pubkey_cache()?; - if let Some(deposit_requests) = block_body.execution_payload()?.deposit_requests()? { - process_deposit_requests(state, &deposit_requests, spec)?; - } - if let Some(withdrawal_requests) = block_body.execution_payload()?.withdrawal_requests()? { - process_withdrawal_requests(state, &withdrawal_requests, spec)?; - } - if let Some(consolidations) = block_body.execution_payload()?.consolidation_requests()? { - process_consolidation_requests(state, &consolidations, spec)?; - } + process_deposit_requests(state, &block_body.execution_requests()?.deposits, spec)?; + process_withdrawal_requests(state, &block_body.execution_requests()?.withdrawals, spec)?; + process_consolidation_requests( + state, + &block_body.execution_requests()?.consolidations, + spec, + )?; } Ok(()) @@ -477,50 +475,13 @@ pub fn apply_deposit( return Ok(()); } - let new_validator_index = state.validators().len(); - - // [Modified in Electra:EIP7251] - let (effective_balance, state_balance) = if state.fork_name_unchecked() >= ForkName::Electra - { - (0, 0) - } else { - ( - std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), - amount, - ) - }; - // Create a new validator. - let validator = Validator { - pubkey: deposit_data.pubkey, - withdrawal_credentials: deposit_data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance, - slashed: false, - }; - state.validators_mut().push(validator)?; - state.balances_mut().push(state_balance)?; - - // Altair or later initializations. - if let Ok(previous_epoch_participation) = state.previous_epoch_participation_mut() { - previous_epoch_participation.push(ParticipationFlags::default())?; - } - if let Ok(current_epoch_participation) = state.current_epoch_participation_mut() { - current_epoch_participation.push(ParticipationFlags::default())?; - } - if let Ok(inactivity_scores) = state.inactivity_scores_mut() { - inactivity_scores.push(0)?; - } + state.add_validator_to_registry(&deposit_data, spec)?; + let new_validator_index = state.validators().len().safe_sub(1)? as u64; // [New in Electra:EIP7251] if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { pending_balance_deposits.push(PendingBalanceDeposit { - index: new_validator_index as u64, + index: new_validator_index, amount, })?; } diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 51f45b87e80..fcb480a37cf 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -82,6 +82,7 @@ struct RewardsAndPenaltiesContext { struct SlashingsContext { adjusted_total_slashing_balance: u64, target_withdrawable_epoch: Epoch, + penalty_per_effective_balance_increment: u64, } struct PendingBalanceDepositsContext { @@ -775,9 +776,16 @@ impl SlashingsContext { .current_epoch .safe_add(E::EpochsPerSlashingsVector::to_u64().safe_div(2)?)?; + let penalty_per_effective_balance_increment = adjusted_total_slashing_balance.safe_div( + state_ctxt + .total_active_balance + .safe_div(spec.effective_balance_increment)?, + )?; + Ok(Self { adjusted_total_slashing_balance, target_withdrawable_epoch, + penalty_per_effective_balance_increment, }) } } @@ -792,14 +800,20 @@ fn process_single_slashing( if validator.slashed && slashings_ctxt.target_withdrawable_epoch == validator.withdrawable_epoch { let increment = spec.effective_balance_increment; - let penalty_numerator = validator - .effective_balance - .safe_div(increment)? - .safe_mul(slashings_ctxt.adjusted_total_slashing_balance)?; - let penalty = penalty_numerator - .safe_div(state_ctxt.total_active_balance)? - .safe_mul(increment)?; - + let penalty = if state_ctxt.fork_name.electra_enabled() { + let effective_balance_increments = validator.effective_balance.safe_div(increment)?; + slashings_ctxt + .penalty_per_effective_balance_increment + .safe_mul(effective_balance_increments)? + } else { + let penalty_numerator = validator + .effective_balance + .safe_div(increment)? + .safe_mul(slashings_ctxt.adjusted_total_slashing_balance)?; + penalty_numerator + .safe_div(state_ctxt.total_active_balance)? + .safe_mul(increment)? + }; *balance.make_mut()? = balance.saturating_sub(penalty); } Ok(()) @@ -1022,8 +1036,7 @@ fn process_single_effective_balance_update( ) -> Result<(), Error> { // Use the higher effective balance limit if post-Electra and compounding withdrawal credentials // are set. - let effective_balance_limit = - validator.get_validator_max_effective_balance(spec, state_ctxt.fork_name); + let effective_balance_limit = validator.get_max_effective_balance(spec, state_ctxt.fork_name); let old_effective_balance = validator.effective_balance; let new_effective_balance = if balance.safe_add(eb_ctxt.downward_threshold)? diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index aff0225edd4..dac83e7553f 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -18,4 +18,3 @@ fixed_bytes = { workspace = true } [features] arbitrary = ["alloy-primitives/arbitrary"] -getrandom = ["alloy-primitives/getrandom"] diff --git a/consensus/swap_or_not_shuffle/src/shuffle_list.rs b/consensus/swap_or_not_shuffle/src/shuffle_list.rs index b49a26cc373..3e93974fe0f 100644 --- a/consensus/swap_or_not_shuffle/src/shuffle_list.rs +++ b/consensus/swap_or_not_shuffle/src/shuffle_list.rs @@ -45,7 +45,7 @@ impl Buf { /// Hash the entire buffer. fn hash(&self) -> Hash256 { - Hash256::from_slice(&hash_fixed(&self.0)) + Hash256::from(hash_fixed(&self.0)) } } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index c1559a407cf..21a15fc5174 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -9,7 +9,7 @@ name = "benches" harness = false [dependencies] -alloy-primitives = { workspace = true, features = ["rlp", "getrandom"] } +alloy-primitives = { workspace = true } merkle_proof = { workspace = true } bls = { workspace = true, features = ["arbitrary"] } kzg = { workspace = true } diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index effc6a21068..0c8bf36c813 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -78,7 +78,7 @@ fn all_benches(c: &mut Criterion) { || (bytes.clone(), spec.clone()), |(bytes, spec)| { let state: BeaconState = - BeaconState::from_ssz_bytes(&bytes, &spec).expect("should decode"); + BeaconState::from_ssz_bytes(bytes, spec).expect("should decode"); black_box(state) }, BatchSize::SmallInput, diff --git a/consensus/types/presets/gnosis/phase0.yaml b/consensus/types/presets/gnosis/phase0.yaml index 87c73e6fb7a..48129cb47ea 100644 --- a/consensus/types/presets/gnosis/phase0.yaml +++ b/consensus/types/presets/gnosis/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/mainnet/phase0.yaml b/consensus/types/presets/mainnet/phase0.yaml index 89bb97d6a87..02bc96c8cdb 100644 --- a/consensus/types/presets/mainnet/phase0.yaml +++ b/consensus/types/presets/mainnet/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/minimal/phase0.yaml b/consensus/types/presets/minimal/phase0.yaml index c9c81325f1b..1f756031421 100644 --- a/consensus/types/presets/minimal/phase0.yaml +++ b/consensus/types/presets/minimal/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**1 (= 1) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 4a6816c024d..a2983035138 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -670,6 +670,7 @@ impl> BeaconBlockElectra graffiti: Graffiti::default(), execution_payload: Payload::Electra::default(), blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), }, } } @@ -700,6 +701,7 @@ impl> EmptyBlock for BeaconBlockElec execution_payload: Payload::Electra::default(), bls_to_execution_changes: VariableList::empty(), blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), }, } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 305ef105445..c81e7bcde93 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -114,6 +114,8 @@ pub struct BeaconBlockBody = FullPay VariableList, #[superstruct(only(Deneb, Electra))] pub blob_kzg_commitments: KzgCommitments, + #[superstruct(only(Electra))] + pub execution_requests: ExecutionRequests, #[superstruct(only(Base, Altair))] #[metastruct(exclude_from(fields))] #[ssz(skip_serializing, skip_deserializing)] @@ -662,6 +664,7 @@ impl From>> execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, } = body; ( @@ -680,6 +683,7 @@ impl From>> }, bls_to_execution_changes, blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests, }, Some(execution_payload), ) @@ -818,6 +822,7 @@ impl BeaconBlockBodyElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, } = self; BeaconBlockBodyElectra { @@ -835,6 +840,7 @@ impl BeaconBlockBodyElectra> { }, bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests: execution_requests.clone(), } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a08f6d720c7..f214991d516 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1548,6 +1548,35 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + pub fn add_validator_to_registry( + &mut self, + deposit_data: &DepositData, + spec: &ChainSpec, + ) -> Result<(), Error> { + let fork = self.fork_name_unchecked(); + let amount = if fork.electra_enabled() { + 0 + } else { + deposit_data.amount + }; + self.validators_mut() + .push(Validator::from_deposit(deposit_data, amount, fork, spec))?; + self.balances_mut().push(amount)?; + + // Altair or later initializations. + if let Ok(previous_epoch_participation) = self.previous_epoch_participation_mut() { + previous_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(current_epoch_participation) = self.current_epoch_participation_mut() { + current_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(inactivity_scores) = self.inactivity_scores_mut() { + inactivity_scores.push(0)?; + } + + Ok(()) + } + /// Safe copy-on-write accessor for the `validators` list. pub fn get_validator_cow( &mut self, @@ -2131,7 +2160,7 @@ impl BeaconState { let max_effective_balance = self .validators() .get(validator_index) - .map(|validator| validator.get_validator_max_effective_balance(spec, current_fork)) + .map(|validator| validator.get_max_effective_balance(spec, current_fork)) .ok_or(Error::UnknownValidator(validator_index))?; Ok(std::cmp::min( *self @@ -2477,33 +2506,64 @@ impl BeaconState { Ok(()) } - pub fn compute_merkle_proof(&self, generalized_index: usize) -> Result, Error> { - // 1. Convert generalized index to field index. - let field_index = match generalized_index { + pub fn compute_current_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX - | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { - // Sync committees are top-level fields, subtract off the generalized indices - // for the internal nodes. Result should be 22 or 23, the field offset of the committee - // in the `BeaconState`: - // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate - generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - light_client_update::FINALIZED_ROOT_INDEX => { - // Finalized root is the right child of `finalized_checkpoint`, divide by two to get - // the generalized index of `state.finalized_checkpoint`. - let finalized_checkpoint_generalized_index = generalized_index / 2; - // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches - // position of `finalized_checkpoint` in `BeaconState`. - finalized_checkpoint_generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - _ => return Err(Error::IndexNotSupported(generalized_index)), }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } - // 2. Get all `BeaconState` leaves. + pub fn compute_next_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } + + pub fn compute_finalized_root_proof(&self) -> Result, Error> { + // Finalized root is the right child of `finalized_checkpoint`, divide by two to get + // the generalized index of `state.finalized_checkpoint`. + let field_index = if self.fork_name_unchecked().electra_enabled() { + // Index should be 169/2 - 64 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + } else { + // Index should be 105/2 - 32 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + let mut proof = self.generate_proof(field_index, &leaves)?; + proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); + Ok(proof) + } + + fn generate_proof( + &self, + field_index: usize, + leaves: &[Hash256], + ) -> Result, Error> { + let depth = self.num_fields_pow2().ilog2() as usize; + let tree = merkle_proof::MerkleTree::create(leaves, depth); + let (_, proof) = tree.generate_proof(field_index, depth)?; + Ok(proof) + } + + fn get_beacon_state_leaves(&self) -> Vec { let mut leaves = vec![]; #[allow(clippy::arithmetic_side_effects)] match self { @@ -2539,18 +2599,7 @@ impl BeaconState { } }; - // 3. Make deposit tree. - // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). - let depth = light_client_update::CURRENT_SYNC_COMMITTEE_PROOF_LEN; - let tree = merkle_proof::MerkleTree::create(&leaves, depth); - let (_, mut proof) = tree.generate_proof(field_index, depth)?; - - // 4. If we're proving the finalized root, patch in the finalized epoch to complete the proof. - if generalized_index == light_client_update::FINALIZED_ROOT_INDEX { - proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); - } - - Ok(proof) + leaves } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index e31427121ec..1c4effb4aec 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -26,7 +26,6 @@ pub enum Domain { SyncCommittee, ContributionAndProof, SyncCommitteeSelectionProof, - Consolidation, ApplicationMask(ApplicationDomain), } @@ -111,12 +110,10 @@ pub struct ChainSpec { pub(crate) domain_voluntary_exit: u32, pub(crate) domain_selection_proof: u32, pub(crate) domain_aggregate_and_proof: u32, - pub(crate) domain_consolidation: u32, /* * Fork choice */ - pub safe_slots_to_update_justified: u64, pub proposer_score_boost: Option, pub reorg_head_weight_threshold: Option, pub reorg_parent_weight_threshold: Option, @@ -159,7 +156,6 @@ pub struct ChainSpec { pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, pub terminal_block_hash_activation_epoch: Epoch, - pub safe_slots_to_import_optimistically: u64, /* * Capella hard fork params @@ -198,6 +194,7 @@ pub struct ChainSpec { pub custody_requirement: u64, pub data_column_sidecar_subnet_count: u64, pub number_of_columns: usize, + pub samples_per_slot: u64, /* * Networking @@ -478,7 +475,6 @@ impl ChainSpec { Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), Domain::BlsToExecutionChange => self.domain_bls_to_execution_change, - Domain::Consolidation => self.domain_consolidation, } } @@ -703,12 +699,10 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_consolidation: 0x0B, /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -759,7 +753,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -811,6 +804,7 @@ impl ChainSpec { custody_requirement: 4, data_column_sidecar_subnet_count: 128, number_of_columns: 128, + samples_per_slot: 8, /* * Network specific @@ -888,7 +882,6 @@ impl ChainSpec { inactivity_penalty_quotient: u64::checked_pow(2, 25).expect("pow does not overflow"), min_slashing_penalty_quotient: 64, proportional_slashing_multiplier: 2, - safe_slots_to_update_justified: 2, // Altair epochs_per_sync_committee_period: Epoch::new(8), altair_fork_version: [0x01, 0x00, 0x00, 0x01], @@ -1024,12 +1017,10 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_consolidation: 0x0B, /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -1080,7 +1071,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -1132,6 +1122,7 @@ impl ChainSpec { custody_requirement: 4, data_column_sidecar_subnet_count: 128, number_of_columns: 128, + samples_per_slot: 8, /* * Network specific */ @@ -1214,9 +1205,6 @@ pub struct Config { pub terminal_block_hash: ExecutionBlockHash, #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, - #[serde(default = "default_safe_slots_to_import_optimistically")] - #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_import_optimistically: u64, #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, @@ -1382,6 +1370,9 @@ pub struct Config { #[serde(default = "default_number_of_columns")] #[serde(with = "serde_utils::quoted_u64")] number_of_columns: u64, + #[serde(default = "default_samples_per_slot")] + #[serde(with = "serde_utils::quoted_u64")] + samples_per_slot: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1424,10 +1415,6 @@ fn default_terminal_block_hash_activation_epoch() -> Epoch { Epoch::new(u64::MAX) } -fn default_safe_slots_to_import_optimistically() -> u64 { - 128u64 -} - fn default_subnets_per_node() -> u8 { 2u8 } @@ -1521,17 +1508,21 @@ const fn default_maximum_gossip_clock_disparity_millis() -> u64 { } const fn default_custody_requirement() -> u64 { - 1 + 4 } const fn default_data_column_sidecar_subnet_count() -> u64 { - 32 + 128 } const fn default_number_of_columns() -> u64 { 128 } +const fn default_samples_per_slot() -> u64 { + 8 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::from_vec( @@ -1644,7 +1635,6 @@ impl Config { terminal_total_difficulty: spec.terminal_total_difficulty, terminal_block_hash: spec.terminal_block_hash, terminal_block_hash_activation_epoch: spec.terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically: spec.safe_slots_to_import_optimistically, min_genesis_active_validator_count: spec.min_genesis_active_validator_count, min_genesis_time: spec.min_genesis_time, @@ -1727,6 +1717,7 @@ impl Config { custody_requirement: spec.custody_requirement, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, number_of_columns: spec.number_of_columns as u64, + samples_per_slot: spec.samples_per_slot, } } @@ -1745,7 +1736,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, min_genesis_active_validator_count, min_genesis_time, genesis_fork_version, @@ -1802,6 +1792,7 @@ impl Config { custody_requirement, data_column_sidecar_subnet_count, number_of_columns, + samples_per_slot, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -1844,7 +1835,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, gossip_max_size, min_epochs_for_block_requests, max_chunk_size, @@ -1881,6 +1871,7 @@ impl Config { custody_requirement, data_column_sidecar_subnet_count, number_of_columns: number_of_columns as usize, + samples_per_slot, ..chain_spec.clone() }) @@ -1946,7 +1937,6 @@ mod tests { &spec, ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); - test_domain(Domain::Consolidation, spec.domain_consolidation, &spec); // The builder domain index is zero let builder_domain_pre_mask = [0; 4]; @@ -2096,7 +2086,6 @@ mod yaml_tests { #TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638911 #TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000001 #TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551614 - #SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY: 2 MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 MIN_GENESIS_TIME: 1606824000 GENESIS_FORK_VERSION: 0x00000000 @@ -2125,6 +2114,7 @@ mod yaml_tests { CUSTODY_REQUIREMENT: 1 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 + SAMPLES_PER_SLOT: 8 "#; let chain_spec: Config = serde_yaml::from_str(spec).unwrap(); @@ -2144,7 +2134,6 @@ mod yaml_tests { check_default!(terminal_total_difficulty); check_default!(terminal_block_hash); check_default!(terminal_block_hash_activation_epoch); - check_default!(safe_slots_to_import_optimistically); check_default!(bellatrix_fork_version); check_default!(gossip_max_size); check_default!(min_epochs_for_block_requests); diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 110392d4b77..c80d678b2a3 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -41,6 +41,7 @@ pub struct ConfigAndPreset { } impl ConfigAndPreset { + // DEPRECATED: the `fork_name` argument is never used, we should remove it. pub fn from_chain_spec(spec: &ChainSpec, fork_name: Option) -> Self { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); @@ -126,7 +127,6 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "compounding_withdrawal_prefix".to_uppercase() => u8_hex(spec.compounding_withdrawal_prefix_byte), "unset_deposit_requests_start_index".to_uppercase() => spec.unset_deposit_requests_start_index.to_string().into(), "full_exit_request_amount".to_uppercase() => spec.full_exit_request_amount.to_string().into(), - "domain_consolidation".to_uppercase()=> u32_hex(spec.domain_consolidation), } } diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation_request.rs index b21f34e7bba..e2df0bb9726 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation_request.rs @@ -1,5 +1,6 @@ use crate::{test_utils::TestRandom, Address, PublicKeyBytes, SignedRoot}; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -24,6 +25,18 @@ pub struct ConsolidationRequest { pub target_pubkey: PublicKeyBytes, } +impl ConsolidationRequest { + pub fn max_size() -> usize { + Self { + source_address: Address::repeat_byte(0), + source_pubkey: PublicKeyBytes::empty(), + target_pubkey: PublicKeyBytes::empty(), + } + .as_ssz_bytes() + .len() + } +} + impl SignedRoot for ConsolidationRequest {} #[cfg(test)] diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit_request.rs index f6ddf8b63a8..7af949fef3a 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit_request.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Hash256, PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -29,6 +30,20 @@ pub struct DepositRequest { pub index: u64, } +impl DepositRequest { + pub fn max_size() -> usize { + Self { + pubkey: PublicKeyBytes::empty(), + withdrawal_credentials: Hash256::ZERO, + amount: 0, + signature: Signature::empty(), + index: 0, + } + .as_ssz_bytes() + .len() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 4d41d568308..9f16b676a6a 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -13,12 +13,6 @@ pub type Transactions = VariableList< >; pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; -pub type DepositRequests = - VariableList::MaxDepositRequestsPerPayload>; -pub type WithdrawalRequests = - VariableList::MaxWithdrawalRequestsPerPayload>; -pub type ConsolidationRequests = - VariableList::MaxConsolidationRequestsPerPayload>; #[superstruct( variants(Bellatrix, Capella, Deneb, Electra), @@ -96,13 +90,6 @@ pub struct ExecutionPayload { #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, - #[superstruct(only(Electra))] - pub deposit_requests: VariableList, - #[superstruct(only(Electra))] - pub withdrawal_requests: VariableList, - #[superstruct(only(Electra))] - pub consolidation_requests: - VariableList, } impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 90dd8c54e21..e9690435f1f 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -86,12 +86,6 @@ pub struct ExecutionPayloadHeader { #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, - #[superstruct(only(Electra), partial_getter(copy))] - pub deposit_requests_root: Hash256, - #[superstruct(only(Electra), partial_getter(copy))] - pub withdrawal_requests_root: Hash256, - #[superstruct(only(Electra), partial_getter(copy))] - pub consolidation_requests_root: Hash256, } impl ExecutionPayloadHeader { @@ -214,9 +208,6 @@ impl ExecutionPayloadHeaderDeneb { withdrawals_root: self.withdrawals_root, blob_gas_used: self.blob_gas_used, excess_blob_gas: self.excess_blob_gas, - deposit_requests_root: Hash256::zero(), - withdrawal_requests_root: Hash256::zero(), - consolidation_requests_root: Hash256::zero(), } } } @@ -308,9 +299,6 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe withdrawals_root: payload.withdrawals.tree_hash_root(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_requests_root: payload.deposit_requests.tree_hash_root(), - withdrawal_requests_root: payload.withdrawal_requests.tree_hash_root(), - consolidation_requests_root: payload.consolidation_requests.tree_hash_root(), } } } diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs new file mode 100644 index 00000000000..778260dd841 --- /dev/null +++ b/consensus/types/src/execution_requests.rs @@ -0,0 +1,59 @@ +use crate::test_utils::TestRandom; +use crate::{ConsolidationRequest, DepositRequest, EthSpec, WithdrawalRequest}; +use alloy_primitives::Bytes; +use derivative::Derivative; +use serde::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +pub type DepositRequests = + VariableList::MaxDepositRequestsPerPayload>; +pub type WithdrawalRequests = + VariableList::MaxWithdrawalRequestsPerPayload>; +pub type ConsolidationRequests = + VariableList::MaxConsolidationRequestsPerPayload>; + +#[derive( + arbitrary::Arbitrary, + Debug, + Derivative, + Default, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +#[serde(bound = "E: EthSpec")] +#[arbitrary(bound = "E: EthSpec")] +#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +pub struct ExecutionRequests { + pub deposits: DepositRequests, + pub withdrawals: WithdrawalRequests, + pub consolidations: ConsolidationRequests, +} + +impl ExecutionRequests { + /// Returns the encoding according to EIP-7685 to send + /// to the execution layer over the engine api. + pub fn get_execution_requests_list(&self) -> Vec { + let deposit_bytes = Bytes::from(self.deposits.as_ssz_bytes()); + let withdrawal_bytes = Bytes::from(self.withdrawals.as_ssz_bytes()); + let consolidation_bytes = Bytes::from(self.consolidations.as_ssz_bytes()); + vec![deposit_bytes, withdrawal_bytes, consolidation_bytes] + } +} + +#[cfg(test)] +mod tests { + use crate::MainnetEthSpec; + + use super::*; + + ssz_and_tree_hash_tests!(ExecutionRequests); +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 281a84d8592..eff52378342 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -81,6 +81,7 @@ pub mod slot_epoch_macros; pub mod activation_queue; pub mod config_and_preset; pub mod execution_block_header; +pub mod execution_requests; pub mod fork_context; pub mod participation_flags; pub mod payload; @@ -169,6 +170,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; +pub use crate::execution_requests::ExecutionRequests; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; @@ -198,7 +200,7 @@ pub use crate::light_client_optimistic_update::{ }; pub use crate::light_client_update::{ Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, + LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, MerkleProof, }; pub use crate::participation_flags::ParticipationFlags; pub use crate::payload::{ diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 7c716e6bb2d..21a7e5416f2 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -57,7 +57,16 @@ pub struct LightClientBootstrap { /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "current_sync_committee_branch_altair") + )] pub current_sync_committee_branch: FixedVector, + #[superstruct( + only(Electra), + partial_getter(rename = "current_sync_committee_branch_electra") + )] + pub current_sync_committee_branch: FixedVector, } impl LightClientBootstrap { @@ -115,7 +124,7 @@ impl LightClientBootstrap { pub fn new( block: &SignedBlindedBeaconBlock, current_sync_committee: Arc>, - current_sync_committee_branch: FixedVector, + current_sync_committee_branch: Vec, chain_spec: &ChainSpec, ) -> Result { let light_client_bootstrap = match block @@ -126,22 +135,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -155,9 +164,7 @@ impl LightClientBootstrap { ) -> Result { let mut header = beacon_state.latest_block_header().clone(); header.state_root = beacon_state.update_tree_hash_cache()?; - let current_sync_committee_branch = - FixedVector::new(beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?)?; - + let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); let light_client_bootstrap = match block @@ -168,22 +175,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -196,21 +203,42 @@ impl ForkVersionDeserialize for LightClientBootstrap { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( + if fork_name.altair_enabled() { + Ok(serde_json::from_value::>(value) + .map_err(serde::de::Error::custom))? + } else { + Err(serde::de::Error::custom(format!( "LightClientBootstrap failed to deserialize: unsupported fork '{}'", fork_name - ))), - _ => Ok(serde_json::from_value::>(value) - .map_err(serde::de::Error::custom))?, + ))) } } } #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientBootstrapAltair, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientBootstrapCapella, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapCapella); + } - ssz_tests!(LightClientBootstrapDeneb); + #[cfg(test)] + mod deneb { + use crate::{LightClientBootstrapDeneb, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientBootstrapElectra, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapElectra); + } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index dc7561f5fcc..ba2f2083cd9 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -63,8 +63,13 @@ pub struct LightClientFinalityUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. - #[test_random(default)] + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FixedVector, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FixedVector, /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -75,7 +80,7 @@ impl LightClientFinalityUpdate { pub fn new( attested_block: &SignedBlindedBeaconBlock, finalized_block: &SignedBlindedBeaconBlock, - finality_branch: FixedVector, + finality_branch: Vec, sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, @@ -92,7 +97,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }) @@ -104,7 +109,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -115,7 +120,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -126,7 +131,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -212,23 +217,42 @@ impl ForkVersionDeserialize for LightClientFinalityUpdate { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( + if fork_name.altair_enabled() { + serde_json::from_value::>(value) + .map_err(serde::de::Error::custom) + } else { + Err(serde::de::Error::custom(format!( "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", fork_name - ))), - _ => Ok( - serde_json::from_value::>(value) - .map_err(serde::de::Error::custom), - )?, + ))) } } } #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientFinalityUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientFinalityUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateCapella); + } - ssz_tests!(LightClientFinalityUpdateDeneb); + #[cfg(test)] + mod deneb { + use crate::{LightClientFinalityUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientFinalityUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateElectra); + } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index c0de114b357..52800f18ac2 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -129,11 +129,10 @@ impl LightClientHeader { } pub fn ssz_max_var_len_for_fork(fork_name: ForkName) -> usize { - match fork_name { - ForkName::Base | ForkName::Altair => 0, - ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { - ExecutionPayloadHeader::::ssz_max_var_len_for_fork(fork_name) - } + if fork_name.capella_enabled() { + ExecutionPayloadHeader::::ssz_max_var_len_for_fork(fork_name) + } else { + 0 } } } @@ -308,3 +307,31 @@ impl ForkVersionDeserialize for LightClientHeader { } } } + +#[cfg(test)] +mod tests { + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientHeaderAltair, MainnetEthSpec}; + ssz_tests!(LightClientHeaderAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientHeaderCapella, MainnetEthSpec}; + ssz_tests!(LightClientHeaderCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientHeaderDeneb, MainnetEthSpec}; + ssz_tests!(LightClientHeaderDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientHeaderElectra, MainnetEthSpec}; + ssz_tests!(LightClientHeaderElectra); + } +} diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 3cae31edf80..209388af87b 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -198,23 +198,44 @@ impl ForkVersionDeserialize for LightClientOptimisticUpdate { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( - "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", - fork_name - ))), - _ => Ok( + if fork_name.altair_enabled() { + Ok( serde_json::from_value::>(value) .map_err(serde::de::Error::custom), - )?, + )? + } else { + Err(serde::de::Error::custom(format!( + "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", + fork_name + ))) } } } #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientOptimisticUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateAltair); + } - ssz_tests!(LightClientOptimisticUpdateDeneb); + #[cfg(test)] + mod capella { + use crate::{LightClientOptimisticUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientOptimisticUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientOptimisticUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateElectra); + } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 3b48a68df31..a7ddf8eb314 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,5 +1,6 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::light_client_header::LightClientHeaderElectra; +use crate::LightClientHeader; use crate::{ beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, @@ -10,10 +11,10 @@ use safe_arith::ArithError; use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; -use ssz::Decode; +use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; -use ssz_types::typenum::{U4, U5, U6}; +use ssz_types::typenum::{U4, U5, U6, U7}; use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; @@ -24,20 +25,39 @@ pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; pub const EXECUTION_PAYLOAD_INDEX: usize = 25; +pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; +pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; +pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; + pub type FinalizedRootProofLen = U6; pub type CurrentSyncCommitteeProofLen = U5; pub type ExecutionPayloadProofLen = U4; - pub type NextSyncCommitteeProofLen = U5; +pub type FinalizedRootProofLenElectra = U7; +pub type CurrentSyncCommitteeProofLenElectra = U6; +pub type NextSyncCommitteeProofLenElectra = U6; + pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; +pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; + +pub type MerkleProof = Vec; +// Max light client updates by range request limits +// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration +pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; + type FinalityBranch = FixedVector; +type FinalityBranchElectra = FixedVector; type NextSyncCommitteeBranch = FixedVector; +type NextSyncCommitteeBranchElectra = FixedVector; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -119,8 +139,17 @@ pub struct LightClientUpdate { pub attested_header: LightClientHeaderElectra, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, - /// Merkle proof for next sync committee + // Merkle proof for next sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "next_sync_committee_branch_altair") + )] pub next_sync_committee_branch: NextSyncCommitteeBranch, + #[superstruct( + only(Electra), + partial_getter(rename = "next_sync_committee_branch_electra") + )] + pub next_sync_committee_branch: NextSyncCommitteeBranchElectra, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -131,7 +160,13 @@ pub struct LightClientUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FinalityBranch, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FinalityBranchElectra, /// current sync aggreggate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -160,8 +195,8 @@ impl LightClientUpdate { sync_aggregate: &SyncAggregate, block_slot: Slot, next_sync_committee: Arc>, - next_sync_committee_branch: FixedVector, - finality_branch: FixedVector, + next_sync_committee_branch: Vec, + finality_branch: Vec, attested_block: &SignedBlindedBeaconBlock, finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, @@ -184,9 +219,9 @@ impl LightClientUpdate { Self::Altair(LightClientUpdateAltair { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -204,9 +239,9 @@ impl LightClientUpdate { Self::Capella(LightClientUpdateCapella { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -224,9 +259,9 @@ impl LightClientUpdate { Self::Deneb(LightClientUpdateDeneb { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -244,9 +279,9 @@ impl LightClientUpdate { Self::Electra(LightClientUpdateElectra { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -386,23 +421,54 @@ impl LightClientUpdate { return Ok(new.signature_slot() < self.signature_slot()); } - fn is_next_sync_committee_branch_empty(&self) -> bool { - for index in self.next_sync_committee_branch().iter() { - if *index != Hash256::default() { - return false; - } + fn is_next_sync_committee_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.next_sync_committee_branch.as_ref()) + }) + } + + pub fn is_finality_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.finality_branch.as_ref()) + }) + } + + // A `LightClientUpdate` has two `LightClientHeader`s + // Spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate + #[allow(clippy::arithmetic_side_effects)] + pub fn ssz_max_len_for_fork(fork_name: ForkName) -> usize { + let fixed_len = match fork_name { + ForkName::Base | ForkName::Bellatrix => 0, + ForkName::Altair => as Encode>::ssz_fixed_len(), + ForkName::Capella => as Encode>::ssz_fixed_len(), + ForkName::Deneb => as Encode>::ssz_fixed_len(), + ForkName::Electra => as Encode>::ssz_fixed_len(), + }; + fixed_len + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) + } + + pub fn map_with_fork_name(&self, func: F) -> R + where + F: Fn(ForkName) -> R, + { + match self { + Self::Altair(_) => func(ForkName::Altair), + Self::Capella(_) => func(ForkName::Capella), + Self::Deneb(_) => func(ForkName::Deneb), + Self::Electra(_) => func(ForkName::Electra), } - true } +} - pub fn is_finality_branch_empty(&self) -> bool { - for index in self.finality_branch().iter() { - if *index != Hash256::default() { - return false; - } +fn is_empty_branch(branch: &[Hash256]) -> bool { + for index in branch.iter() { + if *index != Hash256::default() { + return false; } - true } + true } fn compute_sync_committee_period_at_slot( @@ -416,16 +482,53 @@ fn compute_sync_committee_period_at_slot( #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; use ssz_types::typenum::Unsigned; - ssz_tests!(LightClientUpdateDeneb); + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateAltair); + } + + #[cfg(test)] + mod capella { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateElectra); + } #[test] fn finalized_root_params() { assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32 + 1) > FINALIZED_ROOT_INDEX); assert_eq!(FinalizedRootProofLen::to_usize(), FINALIZED_ROOT_PROOF_LEN); + + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32) <= FINALIZED_ROOT_INDEX_ELECTRA + ); + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32 + 1) > FINALIZED_ROOT_INDEX_ELECTRA + ); + assert_eq!( + FinalizedRootProofLenElectra::to_usize(), + FINALIZED_ROOT_PROOF_LEN_ELECTRA + ); } #[test] @@ -440,6 +543,19 @@ mod tests { CurrentSyncCommitteeProofLen::to_usize(), CURRENT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + CurrentSyncCommitteeProofLenElectra::to_usize(), + CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } #[test] @@ -450,5 +566,18 @@ mod tests { NextSyncCommitteeProofLen::to_usize(), NEXT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + NextSyncCommitteeProofLenElectra::to_usize(), + NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index cee8b8cc219..80a70c171f5 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -39,18 +39,6 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + /// fork-specific fields fn withdrawals_root(&self) -> Result; fn blob_gas_used(&self) -> Result; - fn withdrawal_requests( - &self, - ) -> Result>, Error>; - fn deposit_requests( - &self, - ) -> Result>, Error>; - fn consolidation_requests( - &self, - ) -> Result< - Option>, - Error, - >; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -290,51 +278,6 @@ impl ExecPayload for FullPayload { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - match self { - FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { - Err(Error::IncorrectStateVariant) - } - FullPayload::Electra(inner) => { - Ok(Some(inner.execution_payload.withdrawal_requests.clone())) - } - } - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - match self { - FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { - Err(Error::IncorrectStateVariant) - } - FullPayload::Electra(inner) => { - Ok(Some(inner.execution_payload.deposit_requests.clone())) - } - } - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - match self { - FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { - Err(Error::IncorrectStateVariant) - } - FullPayload::Electra(inner) => { - Ok(Some(inner.execution_payload.consolidation_requests.clone())) - } - } - } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -467,51 +410,6 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - match self { - FullPayloadRef::Bellatrix(_) - | FullPayloadRef::Capella(_) - | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), - FullPayloadRef::Electra(inner) => { - Ok(Some(inner.execution_payload.withdrawal_requests.clone())) - } - } - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - match self { - FullPayloadRef::Bellatrix(_) - | FullPayloadRef::Capella(_) - | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), - FullPayloadRef::Electra(inner) => { - Ok(Some(inner.execution_payload.deposit_requests.clone())) - } - } - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - match self { - FullPayloadRef::Bellatrix(_) - | FullPayloadRef::Capella(_) - | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), - FullPayloadRef::Electra(inner) => { - Ok(Some(inner.execution_payload.consolidation_requests.clone())) - } - } - } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -692,30 +590,6 @@ impl ExecPayload for BlindedPayload { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - Ok(None) - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - Ok(None) - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - Ok(None) - } - fn is_default_with_zero_roots(&self) -> bool { self.to_ref().is_default_with_zero_roots() } @@ -817,30 +691,6 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - Ok(None) - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - Ok(None) - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - Ok(None) - } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'b _, self, move |payload, cons| { cons(payload); @@ -867,10 +717,7 @@ macro_rules! impl_exec_payload_common { $is_default_with_empty_roots:block, $f:block, $g:block, - $h:block, - $i:block, - $j:block, - $k:block) => { + $h:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant @@ -933,30 +780,6 @@ macro_rules! impl_exec_payload_common { let h = $h; h(self) } - - fn withdrawal_requests( - &self, - ) -> Result< - Option>, - Error, - > { - let i = $i; - i(self) - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - let j = $j; - j(self) - } - - fn consolidation_requests( - &self, - ) -> Result::MaxConsolidationRequestsPerPayload>>, Error> { - let k = $k; - k(self) - } } impl From<$wrapped_type> for $wrapper_type { @@ -1002,10 +825,7 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c - }, - { |_| { Ok(None) } }, - { |_| { Ok(None) } }, - { |_| { Ok(None) } } + } ); impl TryInto<$wrapper_type_header> for BlindedPayload { @@ -1092,47 +912,6 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c - }, - { - let c: for<'a> fn( - &'a $wrapper_type_full, - ) -> Result< - Option>, - Error, - > = |payload: &$wrapper_type_full| { - let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); - wrapper_ref_type.withdrawal_requests() - }; - c - }, - { - let c: for<'a> fn( - &'a $wrapper_type_full, - ) -> Result< - Option>, - Error, - > = |payload: &$wrapper_type_full| { - let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); - wrapper_ref_type.deposit_requests() - }; - c - }, - { - let c: for<'a> fn( - &'a $wrapper_type_full, - ) -> Result< - Option< - VariableList< - ConsolidationRequest, - ::MaxConsolidationRequestsPerPayload, - >, - >, - Error, - > = |payload: &$wrapper_type_full| { - let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); - wrapper_ref_type.consolidation_requests() - }; - c } ); diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 2c576ed332c..435a74bdc35 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -27,8 +27,6 @@ pub struct BasePreset { #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_upward_multiplier: u64, #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_update_justified: u64, - #[serde(with = "serde_utils::quoted_u64")] pub min_deposit_amount: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance: u64, @@ -90,7 +88,6 @@ impl BasePreset { hysteresis_quotient: spec.hysteresis_quotient, hysteresis_downward_multiplier: spec.hysteresis_downward_multiplier, hysteresis_upward_multiplier: spec.hysteresis_upward_multiplier, - safe_slots_to_update_justified: spec.safe_slots_to_update_justified, min_deposit_amount: spec.min_deposit_amount, max_effective_balance: spec.max_effective_balance, effective_balance_increment: spec.effective_balance_increment, diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index af4ee87c158..8290876fa1f 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -13,7 +13,7 @@ use std::slice::SliceIndex; /// ## Example /// /// ``` -/// use ssz_types::{RuntimeVariableList}; +/// use types::{RuntimeVariableList}; /// /// let base: Vec = vec![1, 2, 3, 4]; /// diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 4d3279a7f77..b52adcfe412 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -498,6 +498,7 @@ impl SignedBeaconBlockElectra> { execution_payload: BlindedPayloadElectra { .. }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, }, }, signature, @@ -521,6 +522,7 @@ impl SignedBeaconBlockElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, }, }, signature, diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 3c6037e23e3..8cf118eea59 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, DepositData, Epoch, + EthSpec, FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -35,6 +35,34 @@ pub struct Validator { } impl Validator { + #[allow(clippy::arithmetic_side_effects)] + pub fn from_deposit( + deposit_data: &DepositData, + amount: u64, + fork_name: ForkName, + spec: &ChainSpec, + ) -> Self { + let mut validator = Validator { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: 0, + slashed: false, + }; + + let max_effective_balance = validator.get_max_effective_balance(spec, fork_name); + // safe math is unnecessary here since the spec.effecive_balance_increment is never <= 0 + validator.effective_balance = std::cmp::min( + amount - (amount % spec.effective_balance_increment), + max_effective_balance, + ); + + validator + } + /// Returns `true` if the validator is considered active at some epoch. pub fn is_active_at(&self, epoch: Epoch) -> bool { self.activation_epoch <= epoch && epoch < self.exit_epoch @@ -236,7 +264,7 @@ impl Validator { spec: &ChainSpec, current_fork: ForkName, ) -> bool { - let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); + let max_effective_balance = self.get_max_effective_balance(spec, current_fork); let has_max_effective_balance = self.effective_balance == max_effective_balance; let has_excess_balance = balance > max_effective_balance; self.has_execution_withdrawal_credential(spec) @@ -251,11 +279,7 @@ impl Validator { } /// Returns the max effective balance for a validator in gwei. - pub fn get_validator_max_effective_balance( - &self, - spec: &ChainSpec, - current_fork: ForkName, - ) -> u64 { + pub fn get_max_effective_balance(&self, spec: &ChainSpec, current_fork: ForkName) -> u64 { if current_fork >= ForkName::Electra { if self.has_compounding_withdrawal_credential(spec) { spec.max_effective_balance_electra @@ -273,7 +297,7 @@ impl Validator { spec: &ChainSpec, current_fork: ForkName, ) -> u64 { - let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); + let max_effective_balance = self.get_max_effective_balance(spec, current_fork); std::cmp::min(validator_balance, max_effective_balance) } } diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 4c7c16757ed..153506f47aa 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -41,12 +41,11 @@ impl VoluntaryExit { spec: &ChainSpec, ) -> SignedVoluntaryExit { let fork_name = spec.fork_name_at_epoch(self.epoch); - let fork_version = match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - spec.fork_version_for_name(fork_name) - } + let fork_version = if fork_name.deneb_enabled() { // EIP-7044 - ForkName::Deneb | ForkName::Electra => spec.fork_version_for_name(ForkName::Capella), + spec.fork_version_for_name(ForkName::Capella) + } else { + spec.fork_version_for_name(fork_name) }; let domain = spec.compute_domain(Domain::VoluntaryExit, fork_version, genesis_validators_root); diff --git a/consensus/types/src/withdrawal_request.rs b/consensus/types/src/withdrawal_request.rs index b6db0efb26d..1296426ac05 100644 --- a/consensus/types/src/withdrawal_request.rs +++ b/consensus/types/src/withdrawal_request.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Address, PublicKeyBytes}; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -27,6 +28,18 @@ pub struct WithdrawalRequest { pub amount: u64, } +impl WithdrawalRequest { + pub fn max_size() -> usize { + Self { + source_address: Address::repeat_byte(0), + validator_pubkey: PublicKeyBytes::empty(), + amount: 0, + } + .as_ssz_bytes() + .len() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs index 50f5f4e7795..234e624698e 100644 --- a/crypto/kzg/benches/benchmark.rs +++ b/crypto/kzg/benches/benchmark.rs @@ -8,7 +8,7 @@ pub fn bench_init_context(c: &mut Criterion) { .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); - c.bench_function(&format!("Initialize context rust_eth_kzg"), |b| { + c.bench_function("Initialize context rust_eth_kzg", |b| { b.iter(|| { let trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); DASContext::new( @@ -19,7 +19,7 @@ pub fn bench_init_context(c: &mut Criterion) { ) }) }); - c.bench_function(&format!("Initialize context c-kzg (4844)"), |b| { + c.bench_function("Initialize context c-kzg (4844)", |b| { b.iter(|| { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 7c37aa6d67d..1125697c7ca 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -49,7 +49,7 @@ clap_utils = { workspace = true } eth2_network_config = { workspace = true } lighthouse_version = { workspace = true } account_utils = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 9ad40a6acd4..89d759d6629 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -333,7 +333,7 @@ impl EnvironmentBuilder { eth2_network_config: Eth2NetworkConfig, ) -> Result { // Create a new chain spec from the default configuration. - self.eth2_config.spec = Arc::new(eth2_network_config.chain_spec::()?); + self.eth2_config.spec = eth2_network_config.chain_spec::()?.into(); self.eth2_network_config = Some(eth2_network_config); Ok(self) diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 84e8274f06e..34e42a61f67 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -102,4 +102,5 @@ ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index e865fbd272e..e33e4cb9b81 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -115,16 +115,6 @@ fn main() { .global(true) .display_order(0), ) - .arg( - Arg::new("env_log") - .short('l') - .help( - "DEPRECATED Enables environment logging giving access to sub-protocol logs such as discv5 and libp2p", - ) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("logfile") .long("logfile") @@ -333,57 +323,43 @@ fn main() { Arg::new("terminal-total-difficulty-override") .long("terminal-total-difficulty-override") .value_name("INTEGER") - .help("Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. \ - Accepts a 256-bit decimal integer (not a hex value). \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal difficulty. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-override") .long("terminal-block-hash-override") .value_name("TERMINAL_BLOCK_HASH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-epoch-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-epoch-override") .long("terminal-block-hash-epoch-override") .value_name("EPOCH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("safe-slots-to-import-optimistically") .long("safe-slots-to-import-optimistically") .value_name("INTEGER") - .help("Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override this parameter in the event \ - of an attack at the PoS transition block. Incorrect use of this flag can cause your \ - node to possibly accept an invalid chain or sync more slowly. Be extremely careful with \ - this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("genesis-state-url") @@ -626,20 +602,6 @@ fn run( })); } - let mut tracing_log_path: Option = clap_utils::parse_optional(matches, "logfile")?; - - if tracing_log_path.is_none() { - tracing_log_path = Some( - parse_path_or_default(matches, "datadir")? - .join(DEFAULT_BEACON_NODE_DIR) - .join("logs"), - ) - } - - let path = tracing_log_path.clone().unwrap(); - - logging::create_tracing_layer(path); - // Allow Prometheus to export the time at which the process was started. metrics::expose_process_start_time(&log); @@ -655,6 +617,20 @@ fn run( ); } + // Warn for DEPRECATED global flags. This code should be removed when we finish deleting these + // flags. + let deprecated_flags = [ + "terminal-total-difficulty-override", + "terminal-block-hash-override", + "terminal-block-hash-epoch-override", + "safe-slots-to-import-optimistically", + ]; + for flag in deprecated_flags { + if matches.get_one::(flag).is_some() { + slog::warn!(log, "The {} flag is deprecated and does nothing", flag); + } + } + // Note: the current code technically allows for starting a beacon node _and_ a validator // client at the same time. // @@ -724,6 +700,21 @@ fn run( return Ok(()); } + let mut tracing_log_path: Option = + clap_utils::parse_optional(matches, "logfile")?; + + if tracing_log_path.is_none() { + tracing_log_path = Some( + parse_path_or_default(matches, "datadir")? + .join(DEFAULT_BEACON_NODE_DIR) + .join("logs"), + ) + } + + let path = tracing_log_path.clone().unwrap(); + + logging::create_tracing_layer(path); + executor.clone().spawn( async move { if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { diff --git a/lighthouse/src/metrics.rs b/lighthouse/src/metrics.rs index 0002b43e7b9..30e0120582a 100644 --- a/lighthouse/src/metrics.rs +++ b/lighthouse/src/metrics.rs @@ -1,5 +1,5 @@ -pub use lighthouse_metrics::*; use lighthouse_version::VERSION; +pub use metrics::*; use slog::{error, Logger}; use std::sync::LazyLock; use std::time::{SystemTime, UNIX_EPOCH}; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f3832a1a1e5..ffa6e300a75 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -21,10 +21,12 @@ use std::string::ToString; use std::time::Duration; use tempfile::TempDir; use types::non_zero_usize::new_non_zero_usize; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, Hash256, MainnetEthSpec}; +use types::{Address, Checkpoint, Epoch, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; -const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; +const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/"; +const DEFAULT_EXECUTION_JWT_SECRET_KEY: &str = + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; // These dummy ports should ONLY be used for `enr-xxx-port` flags that do not bind. const DUMMY_ENR_TCP_PORT: u16 = 7777; @@ -52,6 +54,18 @@ struct CommandLineTest { } impl CommandLineTest { fn new() -> CommandLineTest { + let mut base_cmd = base_cmd(); + + base_cmd + .arg("--execution-endpoint") + .arg(DEFAULT_EXECUTION_ENDPOINT) + .arg("--execution-jwt-secret-key") + .arg(DEFAULT_EXECUTION_JWT_SECRET_KEY); + CommandLineTest { cmd: base_cmd } + } + + // Required for testing different JWT authentication methods. + fn new_with_no_execution_endpoint() -> CommandLineTest { let base_cmd = base_cmd(); CommandLineTest { cmd: base_cmd } } @@ -104,7 +118,7 @@ fn staking_flag() { assert!(config.sync_eth1_chain); assert_eq!( config.eth1.endpoint.get_endpoint().to_string(), - DEFAULT_ETH1_ENDPOINT + DEFAULT_EXECUTION_ENDPOINT ); }); } @@ -159,13 +173,6 @@ fn max_skip_slots_flag() { .with_config(|config| assert_eq!(config.chain.import_max_skip_slots, Some(10))); } -#[test] -fn disable_lock_timeouts_flag() { - CommandLineTest::new() - .flag("disable-lock-timeouts", None) - .run_with_zero_port(); -} - #[test] fn shuffling_cache_default() { CommandLineTest::new() @@ -260,7 +267,7 @@ fn always_prepare_payload_default() { #[test] fn always_prepare_payload_override() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("always-prepare-payload", None) .flag( "suggested-fee-recipient", @@ -466,7 +473,7 @@ fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { // this is way better but intersperse is still a nightly feature :/ // let endpoint_arg: String = urls.into_iter().intersperse(",").collect(); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag(flag, Some(&endpoint_arg)) .flag("execution-jwt", Some(&jwts_arg)) .run_with_zero_port() @@ -487,7 +494,7 @@ fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { #[test] fn run_execution_jwt_secret_key_is_persisted() { let jwt_secret_key = "0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33"; - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoint", Some("http://localhost:8551/")) .flag("execution-jwt-secret-key", Some(jwt_secret_key)) .run_with_zero_port() @@ -508,7 +515,7 @@ fn run_execution_jwt_secret_key_is_persisted() { #[test] fn execution_timeout_multiplier_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoint", Some("http://meow.cats")) .flag( "execution-jwt", @@ -535,7 +542,7 @@ fn bellatrix_jwt_secrets_flag() { let mut file = File::create(dir.path().join("jwtsecrets")).expect("Unable to create file"); file.write_all(b"0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33") .expect("Unable to write to file"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoints", Some("http://localhost:8551/")) .flag( "jwt-secrets", @@ -557,7 +564,7 @@ fn bellatrix_jwt_secrets_flag() { #[test] fn bellatrix_fee_recipient_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoint", Some("http://meow.cats")) .flag( "execution-jwt", @@ -598,7 +605,7 @@ fn run_payload_builder_flag_test_with_config( f: F, ) { let dir = TempDir::new().expect("Unable to create temporary directory"); - let mut test = CommandLineTest::new(); + let mut test = CommandLineTest::new_with_no_execution_endpoint(); test.flag("execution-endpoint", Some("http://meow.cats")) .flag( "execution-jwt", @@ -720,7 +727,7 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl let jwt_file = "jwt-file"; let id = "bn-1"; let version = "Lighthouse-v2.1.3"; - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoint", Some(execution_endpoint)) .flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str()) .flag(jwt_id_flag, Some(id)) @@ -749,16 +756,14 @@ fn jwt_optional_flags() { fn jwt_optional_alias_flags() { run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version"); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_total_difficulty_override_flag() { - use beacon_node::beacon_chain::types::Uint256; CommandLineTest::new() .flag("terminal-total-difficulty-override", Some("1337424242")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.terminal_total_difficulty, Uint256::from(1337424242)) - }); + .run_with_zero_port(); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_block_hash_and_activation_epoch_override_flags() { CommandLineTest::new() @@ -767,43 +772,14 @@ fn terminal_block_hash_and_activation_epoch_override_flags() { "terminal-block-hash-override", Some("0x4242424242424242424242424242424242424242424242424242424242424242"), ) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!( - spec.terminal_block_hash, - ExecutionBlockHash::from_str( - "0x4242424242424242424242424242424242424242424242424242424242424242" - ) - .unwrap() - ); - assert_eq!(spec.terminal_block_hash_activation_epoch, 1337); - }); -} -#[test] -#[should_panic] -fn terminal_block_hash_missing_activation_epoch() { - CommandLineTest::new() - .flag( - "terminal-block-hash-override", - Some("0x4242424242424242424242424242424242424242424242424242424242424242"), - ) - .run_with_zero_port(); -} -#[test] -#[should_panic] -fn epoch_override_missing_terminal_block_hash() { - CommandLineTest::new() - .flag("terminal-block-hash-epoch-override", Some("1337")) .run_with_zero_port(); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn safe_slots_to_import_optimistically_flag() { CommandLineTest::new() .flag("safe-slots-to-import-optimistically", Some("421337")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.safe_slots_to_import_optimistically, 421337) - }); + .run_with_zero_port(); } // Tests for Network flags. @@ -1612,19 +1588,6 @@ fn http_port_flag() { .run() .with_config(|config| assert_eq!(config.http_api.listen_port, port1)); } -#[test] -fn empty_self_limiter_flag() { - // Test that empty rate limiter is accepted using the default rate limiting configurations. - CommandLineTest::new() - .flag("self-limiter", None) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.network.outbound_rate_limiter_config, - Some(lighthouse_network::rpc::config::OutboundRateLimiterConfig::default()) - ) - }); -} #[test] fn empty_inbound_rate_limiter_flag() { @@ -1667,14 +1630,6 @@ fn http_allow_origin_all_flag() { .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); } -#[test] -fn http_allow_sync_stalled_flag() { - CommandLineTest::new() - .flag("http", None) - .flag("http-allow-sync-stalled", None) - .run_with_zero_port(); -} - #[test] fn http_enable_beacon_processor() { CommandLineTest::new() @@ -1713,22 +1668,6 @@ fn http_tls_flags() { }); } -#[test] -fn http_spec_fork_default() { - CommandLineTest::new() - .flag("http", None) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.spec_fork_name, None)); -} - -#[test] -fn http_spec_fork_override() { - CommandLineTest::new() - .flag("http", None) - .flag("http-spec-fork", Some("altair")) - .run_with_zero_port(); -} - // Tests for Metrics flags. #[test] fn metrics_flag() { @@ -2505,13 +2444,13 @@ fn logfile_format_flag() { fn sync_eth1_chain_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); + .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); } #[test] fn sync_eth1_chain_execution_endpoints_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoints", Some("http://localhost:8551/")) .flag( "execution-jwt", @@ -2524,7 +2463,7 @@ fn sync_eth1_chain_execution_endpoints_flag() { #[test] fn sync_eth1_chain_disable_deposit_contract_sync_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("disable-deposit-contract-sync", None) .flag("execution-endpoints", Some("http://localhost:8551/")) .flag( @@ -2631,14 +2570,6 @@ fn invalid_gossip_verified_blocks_path() { }); } -#[test] -fn progressive_balances_checked() { - // Flag is deprecated but supplying it should not crash until we remove it completely. - CommandLineTest::new() - .flag("progressive-balances", Some("checked")) - .run_with_zero_port(); -} - #[test] fn beacon_processor() { CommandLineTest::new() diff --git a/lighthouse/tests/exec.rs b/lighthouse/tests/exec.rs index 9d6453908c8..5379912c131 100644 --- a/lighthouse/tests/exec.rs +++ b/lighthouse/tests/exec.rs @@ -140,11 +140,6 @@ impl CompletedTest { func(&self.config); } - pub fn with_spec(self, func: F) { - let spec = ChainSpec::from_config::(&self.chain_config).unwrap(); - func(spec); - } - pub fn with_config_and_dir(self, func: F) { func(&self.config, &self.dir); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index baf50aa7c07..f5947d41278 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -426,13 +426,6 @@ fn no_doppelganger_protection_flag() { .with_config(|config| assert!(!config.enable_doppelganger_protection)); } -#[test] -fn produce_block_v3_flag() { - // The flag is DEPRECATED but providing it should not trigger an error. - // We can delete this test when deleting the flag entirely. - CommandLineTest::new().flag("produce-block-v3", None).run(); -} - #[test] fn no_gas_limit_flag() { CommandLineTest::new() @@ -513,24 +506,6 @@ fn monitoring_endpoint() { assert_eq!(api_conf.update_period_secs, Some(30)); }); } -#[test] -fn disable_run_on_all_flag() { - CommandLineTest::new() - .flag("disable-run-on-all", None) - .run() - .with_config(|config| { - assert_eq!(config.broadcast_topics, vec![]); - }); - // --broadcast flag takes precedence - CommandLineTest::new() - .flag("disable-run-on-all", None) - .flag("broadcast", Some("attestations")) - .run() - .with_config(|config| { - assert_eq!(config.broadcast_topics, vec![ApiTopic::Attestations]); - }); -} - #[test] fn no_broadcast_flag() { CommandLineTest::new().run().with_config(|config| { @@ -623,16 +598,6 @@ fn disable_latency_measurement_service() { assert!(!config.enable_latency_measurement_service); }); } -#[test] -fn latency_measurement_service() { - // This flag is DEPRECATED so has no effect, but should still be accepted. - CommandLineTest::new() - .flag("latency-measurement-service", Some("false")) - .run() - .with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); -} #[test] fn validator_registration_batch_size() { diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index bca6a18ab56..999f3c31415 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -9,7 +9,9 @@ use tempfile::{tempdir, TempDir}; use types::*; use validator_manager::{ create_validators::CreateConfig, + delete_validators::DeleteConfig, import_validators::ImportConfig, + list_validators::ListConfig, move_validators::{MoveConfig, PasswordSource, Validators}, }; @@ -105,6 +107,18 @@ impl CommandLineTest { } } +impl CommandLineTest { + fn validators_list() -> Self { + Self::default().flag("list", None) + } +} + +impl CommandLineTest { + fn validators_delete() -> Self { + Self::default().flag("delete", None) + } +} + #[test] pub fn validator_create_without_output_path() { CommandLineTest::validators_create().assert_failed(); @@ -199,10 +213,18 @@ pub fn validator_import_defaults() { .flag("--vc-token", Some("./token.json")) .assert_success(|config| { let expected = ImportConfig { - validators_file_path: PathBuf::from("./vals.json"), + validators_file_path: Some(PathBuf::from("./vals.json")), + keystore_file_path: None, vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), vc_token_path: PathBuf::from("./token.json"), ignore_duplicates: false, + password: None, + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, }; assert_eq!(expected, config); }); @@ -216,10 +238,18 @@ pub fn validator_import_misc_flags() { .flag("--ignore-duplicates", None) .assert_success(|config| { let expected = ImportConfig { - validators_file_path: PathBuf::from("./vals.json"), + validators_file_path: Some(PathBuf::from("./vals.json")), + keystore_file_path: None, vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), vc_token_path: PathBuf::from("./token.json"), ignore_duplicates: true, + password: None, + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, }; assert_eq!(expected, config); }); @@ -233,7 +263,17 @@ pub fn validator_import_missing_token() { } #[test] -pub fn validator_import_missing_validators_file() { +pub fn validator_import_using_both_file_flags() { + CommandLineTest::validators_import() + .flag("--vc-token", Some("./token.json")) + .flag("--validators-file", Some("./vals.json")) + .flag("--keystore-file", Some("./keystore.json")) + .flag("--password", Some("abcd")) + .assert_failed(); +} + +#[test] +pub fn validator_import_missing_both_file_flags() { CommandLineTest::validators_import() .flag("--vc-token", Some("./token.json")) .assert_failed(); @@ -394,3 +434,37 @@ pub fn validator_move_count() { assert_eq!(expected, config); }); } + +#[test] +pub fn validator_list_defaults() { + CommandLineTest::validators_list() + .flag("--vc-token", Some("./token.json")) + .assert_success(|config| { + let expected = ListConfig { + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_delete_defaults() { + CommandLineTest::validators_delete() + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--vc-token", Some("./token.json")) + .assert_success(|config| { + let expected = DeleteConfig { + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + validators_to_delete: vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap(), + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_1).unwrap(), + ], + }; + assert_eq!(expected, config); + }); +} diff --git a/scripts/cli.sh b/scripts/cli.sh index 6ca019b39e9..ef4ed158ad8 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -16,7 +16,7 @@ write_to_file() { printf "# %s\n\n\`\`\`\n%s\n\`\`\`" "$program" "$cmd" > "$file" # Adjust the width of the help text and append to the end of file - sed -i -e '$a\'$'\n''\n''' "$file" + printf "\n\n%s\n" "" >> "$file" } CMD=./target/release/lighthouse @@ -40,7 +40,7 @@ vm_import=./help_vm_import.md vm_move=./help_vm_move.md # create .md files -write_to_file "$general_cli" "$general" "Lighthouse General Commands" +write_to_file "$general_cli" "$general" "Lighthouse CLI Reference" write_to_file "$bn_cli" "$bn" "Beacon Node" write_to_file "$vc_cli" "$vc" "Validator Client" write_to_file "$vm_cli" "$vm" "Validator Manager" diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 0275cb217f8..ca701eb7e91 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -9,7 +9,7 @@ This setup can be useful for testing and development. 1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version. -1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `sudo apt install yq -y`. +1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. ## Starting the testnet @@ -82,4 +82,4 @@ The script comes with some CLI options, which can be viewed with `./start_local_ ```bash ./start_local_testnet.sh -b false -``` \ No newline at end of file +``` diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index f90132764e4..1f156886931 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -7,7 +7,7 @@ set -Eeuo pipefail SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" ENCLAVE_NAME=local-testnet NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml -ETHEREUM_PKG_VERSION=4.2.0 +ETHEREUM_PKG_VERSION=main BUILD_IMAGE=true BUILDER_PROPOSALS=false diff --git a/scripts/local_testnet/stop_local_testnet.sh b/scripts/local_testnet/stop_local_testnet.sh index 5500f8d5a04..6af1989e9ff 100755 --- a/scripts/local_testnet/stop_local_testnet.sh +++ b/scripts/local_testnet/stop_local_testnet.sh @@ -12,4 +12,5 @@ kurtosis enclave dump $ENCLAVE_NAME $LOGS_SUBDIR echo "Local testnet logs stored to $LOGS_SUBDIR." kurtosis enclave rm -f $ENCLAVE_NAME +kurtosis engine stop echo "Local testnet stopped." diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index d74b0ac062a..56a023df0bb 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -18,7 +18,7 @@ derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } filesystem = { workspace = true } lru = { workspace = true } parking_lot = { workspace = true } @@ -37,7 +37,7 @@ mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } -redb = { version = "2.1", optional = true } +redb = { version = "2.1.4", optional = true } [dev-dependencies] maplit = { workspace = true } diff --git a/slasher/src/database/redb_impl.rs b/slasher/src/database/redb_impl.rs index 6c5b62a44fd..12bef711484 100644 --- a/slasher/src/database/redb_impl.rs +++ b/slasher/src/database/redb_impl.rs @@ -164,13 +164,9 @@ impl<'env> Cursor<'env> { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(&self.db.table_name); let table = self.txn.open_table(table_definition)?; - let first = table - .iter()? - .next() - .map(|x| x.map(|(key, _)| key.value().to_vec())); + let first = table.first()?.map(|(key, _)| key.value().to_vec()); if let Some(owned_key) = first { - let owned_key = owned_key?; self.current_key = Some(Cow::from(owned_key)); Ok(self.current_key.clone()) } else { @@ -182,13 +178,9 @@ impl<'env> Cursor<'env> { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(&self.db.table_name); let table = self.txn.open_table(table_definition)?; - let last = table - .iter()? - .next_back() - .map(|x| x.map(|(key, _)| key.value().to_vec())); + let last = table.last()?.map(|(key, _)| key.value().to_vec()); if let Some(owned_key) = last { - let owned_key = owned_key?; self.current_key = Some(Cow::from(owned_key)); return Ok(self.current_key.clone()); } diff --git a/slasher/src/metrics.rs b/slasher/src/metrics.rs index 2e49bd4aeba..cfeec2d74ed 100644 --- a/slasher/src/metrics.rs +++ b/slasher/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static SLASHER_DATABASE_SIZE: LazyLock> = LazyLock::new(|| { diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 0aa5f1d38db..390711079f4 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.5 +TESTS_TAG := v1.5.0-alpha.6 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 9495047e7f9..dacca204c19 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -25,6 +25,8 @@ # Intentionally omitted, as per https://github.com/sigp/lighthouse/issues/1835 "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", + # We no longer implement merge logic. + "tests/.*/bellatrix/fork_choice/on_merge_block", # light_client "tests/.*/.*/light_client/single_merkle_proof", "tests/.*/.*/light_client/sync", @@ -46,11 +48,6 @@ "tests/.*/eip6110", "tests/.*/whisk", "tests/.*/eip7594", - # TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - "tests/.*/electra/ssz_static/LightClientUpdate", - "tests/.*/electra/ssz_static/LightClientFinalityUpdate", - "tests/.*/electra/ssz_static/LightClientBootstrap", - "tests/.*/electra/merkle_proof", ] diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index b68bbdc5d39..49c07197848 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -3,8 +3,8 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use tree_hash::Hash256; use types::{ - BeaconBlockBody, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, FixedVector, - FullPayload, Unsigned, + light_client_update, BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, + BeaconBlockBodyElectra, BeaconState, FixedVector, FullPayload, Unsigned, }; #[derive(Debug, Clone, Deserialize)] @@ -22,13 +22,13 @@ pub struct MerkleProof { #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] -pub struct MerkleProofValidity { +pub struct BeaconStateMerkleProofValidity { pub metadata: Option, pub state: BeaconState, pub merkle_proof: MerkleProof, } -impl LoadCase for MerkleProofValidity { +impl LoadCase for BeaconStateMerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?; @@ -49,11 +49,30 @@ impl LoadCase for MerkleProofValidity { } } -impl Case for MerkleProofValidity { +impl Case for BeaconStateMerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); state.update_tree_hash_cache().unwrap(); - let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else { + + let proof = match self.merkle_proof.leaf_index { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::CURRENT_SYNC_COMMITTEE_INDEX => { + state.compute_current_sync_committee_proof() + } + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { + state.compute_next_sync_committee_proof() + } + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + | light_client_update::FINALIZED_ROOT_INDEX => state.compute_finalized_root_proof(), + _ => { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof, invalid index".to_string(), + )); + } + }; + + let Ok(proof) = proof else { return Err(Error::FailedToParseTest( "Could not retrieve merkle proof".to_string(), )); @@ -198,3 +217,81 @@ impl Case for KzgInclusionMerkleProofValidity { } } } + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct BeaconBlockBodyMerkleProofValidity { + pub metadata: Option, + pub block_body: BeaconBlockBody>, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for BeaconBlockBodyMerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let block_body: BeaconBlockBody> = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix => { + return Err(Error::InternalError(format!( + "Beacon block body merkle proof validity test skipped for {:?}", + fork_name + ))) + } + ForkName::Capella => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + ForkName::Deneb => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() + } + ForkName::Electra => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + }; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + Ok(Self { + metadata, + block_body, + merkle_proof, + }) + } +} + +impl Case for BeaconBlockBodyMerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let binding = self.block_body.clone(); + let block_body = binding.to_ref(); + let Ok(proof) = block_body.block_body_merkle_proof(self.merkle_proof.leaf_index) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merke proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + + Ok(()) + } +} diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 24184441047..54ca52447f4 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -270,7 +270,7 @@ impl Operation for SyncAggregate { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name != ForkName::Base + fork_name.altair_enabled() } fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index dacaba1dcab..f4a09de32cb 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -627,8 +627,8 @@ impl Handler for ForkChoiceHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // Merge block tests are only enabled for Bellatrix. - if self.handler_name == "on_merge_block" && fork_name != ForkName::Bellatrix { + // We no longer run on_merge_block tests since removing merge support. + if self.handler_name == "on_merge_block" { return false; } @@ -921,10 +921,10 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { #[derive(Derivative)] #[derivative(Default(bound = ""))] -pub struct MerkleProofValidityHandler(PhantomData); +pub struct BeaconStateMerkleProofValidityHandler(PhantomData); -impl Handler for MerkleProofValidityHandler { - type Case = cases::MerkleProofValidity; +impl Handler for BeaconStateMerkleProofValidityHandler { + type Case = cases::BeaconStateMerkleProofValidity; fn config_name() -> &'static str { E::name() @@ -935,15 +935,11 @@ impl Handler for MerkleProofValidityHandler { } fn handler_name(&self) -> String { - "single_merkle_proof".into() + "single_merkle_proof/BeaconState".into() } - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // Test is skipped due to some changes in the Capella light client - // spec. - // - // https://github.com/sigp/lighthouse/issues/4022 - false + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.altair_enabled() } } @@ -968,7 +964,31 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { // Enabled in Deneb - fork_name == ForkName::Deneb + fork_name.deneb_enabled() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct BeaconBlockBodyMerkleProofValidityHandler(PhantomData); + +impl Handler for BeaconBlockBodyMerkleProofValidityHandler { + type Case = cases::BeaconBlockBodyMerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "single_merkle_proof/BeaconBlockBody".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.capella_enabled() } } @@ -993,8 +1013,7 @@ impl Handler for LightClientUpdateHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Enabled in Altair - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - fork_name != ForkName::Base && fork_name != ForkName::Electra + fork_name.altair_enabled() } } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 49de073d6ae..a9322e5dd5e 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -80,6 +80,7 @@ type_name_generic!(ExecutionPayloadHeaderBellatrix, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderDeneb, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderElectra, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionRequests); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index a677736d519..3f802d89447 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -396,11 +396,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only() - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only() - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); } // LightClientHeader has no internal indicator of which fork it is for, so we test it separately. @@ -476,13 +475,12 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only( ) .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } // LightClientUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -506,13 +504,12 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } #[test] @@ -679,6 +676,14 @@ mod ssz_static { SszStaticHandler::::electra_and_later().run(); SszStaticHandler::::electra_and_later().run(); } + + #[test] + fn execution_requests() { + SszStaticHandler::, MainnetEthSpec>::electra_and_later() + .run(); + SszStaticHandler::, MinimalEthSpec>::electra_and_later() + .run(); + } } #[test] @@ -818,12 +823,6 @@ fn fork_choice_on_block() { ForkChoiceHandler::::new("on_block").run(); } -#[test] -fn fork_choice_on_merge_block() { - ForkChoiceHandler::::new("on_merge_block").run(); - ForkChoiceHandler::::new("on_merge_block").run(); -} - #[test] fn fork_choice_ex_ante() { ForkChoiceHandler::::new("ex_ante").run(); @@ -920,8 +919,13 @@ fn kzg_recover_cells_and_proofs() { } #[test] -fn merkle_proof_validity() { - MerkleProofValidityHandler::::default().run(); +fn beacon_state_merkle_proof_validity() { + BeaconStateMerkleProofValidityHandler::::default().run(); +} + +#[test] +fn beacon_block_body_merkle_proof_validity() { + BeaconBlockBodyMerkleProofValidityHandler::::default().run(); } #[test] diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index e1cef95cd32..5c9baa2349b 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -206,7 +206,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { node.server.all_payloads_valid(); }); - let duration_to_genesis = network.duration_to_genesis().await; + let duration_to_genesis = network.duration_to_genesis().await?; println!("Duration to genesis: {}", duration_to_genesis.as_secs()); sleep(duration_to_genesis).await; diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 3859257fb75..0690ab242c5 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -194,7 +194,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { ); } - let duration_to_genesis = network.duration_to_genesis().await; + let duration_to_genesis = network.duration_to_genesis().await?; println!("Duration to genesis: {}", duration_to_genesis.as_secs()); sleep(duration_to_genesis).await; diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 7b9327a7aaa..59efc09baa6 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -459,7 +459,7 @@ impl LocalNetwork { .map(|body| body.unwrap().data.finalized.epoch) } - pub async fn duration_to_genesis(&self) -> Duration { + pub async fn duration_to_genesis(&self) -> Result { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); @@ -471,6 +471,9 @@ impl LocalNetwork { .data .genesis_time, ); - genesis_time - now + genesis_time.checked_sub(now).ok_or( + "The genesis time has already passed since all nodes started. The node startup time \ + may have regressed, and the current `GENESIS_DELAY` is no longer sufficient.", + ) } } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index fb2d2c92089..959b7f52e9d 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -53,7 +53,7 @@ ethereum_serde_utils = { workspace = true } libsecp256k1 = { workspace = true } ring = { workspace = true } rand = { workspace = true, features = ["small_rng"] } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } task_executor = { workspace = true } diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 1e4d901651d..3d209b53585 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -436,7 +436,7 @@ impl BeaconNodeFallback { for candidate in candidates.iter() { let health = candidate.health().await; - match candidate.health().await { + match health { Ok(health) => { if self .distance_tiers @@ -467,7 +467,9 @@ impl BeaconNodeFallback { /// low quality responses. To route around this it's best to poll all connected beacon nodes. /// A previous implementation of this function polled only the unavailable BNs. pub async fn update_all_candidates(&self) { - let candidates = self.candidates.read().await; + // Clone the vec, so we release the read lock immediately. + // `candidate.health` is behind an Arc, so this would still allow us to mutate the values. + let candidates = self.candidates.read().await.clone(); let mut futures = Vec::with_capacity(candidates.len()); let mut nodes = Vec::with_capacity(candidates.len()); @@ -510,8 +512,9 @@ impl BeaconNodeFallback { pub async fn measure_latency(&self) -> Vec { let candidates = self.candidates.read().await; let futures: Vec<_> = candidates - .iter() - .map(|candidate| async { + .clone() + .into_iter() + .map(|candidate| async move { let beacon_node_id = candidate.beacon_node.to_string(); // The `node/version` endpoint is used since I imagine it would // require the least processing in the BN and therefore measure @@ -528,6 +531,7 @@ impl BeaconNodeFallback { (beacon_node_id, response_instant) }) .collect(); + drop(candidates); let request_instant = Instant::now(); @@ -553,12 +557,25 @@ impl BeaconNodeFallback { R: Future>, Err: Debug, { + let mut errors = vec![]; + // First pass: try `func` on all candidates. Candidate order has already been set in // `update_all_candidates`. This ensures the most suitable node is always tried first. let candidates = self.candidates.read().await; - let mut errors = vec![]; + let mut futures = vec![]; + + // Run `func` using a `candidate`, returning the value or capturing errors. for candidate in candidates.iter() { - match Self::run_on_candidate(candidate, &func, &self.log).await { + futures.push(Self::run_on_candidate( + candidate.beacon_node.clone(), + &func, + &self.log, + )); + } + drop(candidates); + + for future in futures { + match future.await { Ok(val) => return Ok(val), Err(e) => errors.push(e), } @@ -566,8 +583,21 @@ impl BeaconNodeFallback { // Second pass. No candidates returned successfully. Try again with the same order. // This will duplicate errors. + let candidates = self.candidates.read().await; + let mut futures = vec![]; + + // Run `func` using a `candidate`, returning the value or capturing errors. for candidate in candidates.iter() { - match Self::run_on_candidate(candidate, &func, &self.log).await { + futures.push(Self::run_on_candidate( + candidate.beacon_node.clone(), + &func, + &self.log, + )); + } + drop(candidates); + + for future in futures { + match future.await { Ok(val) => return Ok(val), Err(e) => errors.push(e), } @@ -579,7 +609,7 @@ impl BeaconNodeFallback { /// Run the future `func` on `candidate` while reporting metrics. async fn run_on_candidate( - candidate: &CandidateBeaconNode, + candidate: BeaconNodeHttpClient, func: F, log: &Logger, ) -> Result)> @@ -588,21 +618,21 @@ impl BeaconNodeFallback { R: Future>, Err: Debug, { - inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.beacon_node.as_ref()]); + inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.as_ref()]); // There exists a race condition where `func` may be called when the candidate is // actually not ready. We deem this an acceptable inefficiency. - match func(candidate.beacon_node.clone()).await { + match func(candidate.clone()).await { Ok(val) => Ok(val), Err(e) => { debug!( log, "Request to beacon node failed"; - "node" => %candidate.beacon_node, + "node" => %candidate, "error" => ?e, ); - inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.beacon_node.as_ref()]); - Err((candidate.beacon_node.to_string(), Error::RequestFailed(e))) + inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.as_ref()]); + Err((candidate.to_string(), Error::RequestFailed(e))) } } } @@ -626,8 +656,14 @@ impl BeaconNodeFallback { // Run `func` using a `candidate`, returning the value or capturing errors. for candidate in candidates.iter() { - futures.push(Self::run_on_candidate(candidate, &func, &self.log)); + futures.push(Self::run_on_candidate( + candidate.beacon_node.clone(), + &func, + &self.log, + )); } + drop(candidates); + let results = future::join_all(futures).await; let errors: Vec<_> = results.into_iter().filter_map(|res| res.err()).collect(); @@ -701,21 +737,21 @@ impl ApiTopic { #[cfg(test)] mod tests { + use crate::beacon_node_health::BeaconNodeHealthTier; + use crate::SensitiveUrl; + use eth2::Timeouts; use logging::test_logger; use std::str::FromStr; use strum::VariantNames; use types::EmptyBlock; - use eth2::Timeouts; use slot_clock::TestingSlotClock; use types::{BeaconBlockDeneb, BlindedBeaconBlock, MainnetEthSpec, Slot}; use super::*; - use crate::beacon_node_health::BeaconNodeHealthTier; use crate::block_service::{BlockService, BlockServiceBuilder, UnsignedBlock}; use crate::testing::mock_beacon_node::MockBeaconNode; use crate::testing::validator_test_rig::ValidatorTestRig; - use crate::SensitiveUrl; type E = MainnetEthSpec; diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 784a0d66abb..4864ca98fd3 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -546,7 +546,7 @@ impl BlockService { &[metrics::BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blocks(signed_block) + .post_beacon_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } @@ -556,7 +556,7 @@ impl BlockService { &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blinded_blocks(signed_block) + .post_beacon_blinded_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index b027ad0df6d..209876f07b0 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -39,20 +39,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - // TODO remove this flag in a future release - .arg( - Arg::new("disable-run-on-all") - .long("disable-run-on-all") - .value_name("DISABLE_RUN_ON_ALL") - .help("DEPRECATED. Use --broadcast. \ - By default, Lighthouse publishes attestation, sync committee subscriptions \ - and proposer preparation messages to all beacon nodes provided in the \ - `--beacon-nodes flag`. This option changes that behaviour such that these \ - api calls only go out to the first available and synced beacon node") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("broadcast") .long("broadcast") @@ -167,14 +153,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("produce-block-v3") - .long("produce-block-v3") - .help("This flag is deprecated and is no longer in use.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("distributed") .long("distributed") @@ -403,15 +381,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("latency-measurement-service") - .long("latency-measurement-service") - .help("DEPRECATED") - .action(ArgAction::Set) - .help_heading(FLAG_HEADER) - .display_order(0) - .hide(true) - ) .arg( Arg::new("validator-registration-batch-size") .long("validator-registration-batch-size") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index c2c445c48c3..f42ed551463 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -244,14 +244,6 @@ impl Config { config.distributed = true; } - if cli_args.get_flag("disable-run-on-all") { - warn!( - log, - "The --disable-run-on-all flag is deprecated"; - "msg" => "please use --broadcast instead" - ); - config.broadcast_topics = vec![]; - } if let Some(broadcast_topics) = cli_args.get_one::("broadcast") { config.broadcast_topics = broadcast_topics .split(',') @@ -397,14 +389,6 @@ impl Config { config.prefer_builder_proposals = true; } - if cli_args.get_flag("produce-block-v3") { - warn!( - log, - "produce-block-v3 flag"; - "note" => "deprecated flag has no effect and should be removed" - ); - } - config.gas_limit = cli_args .get_one::("gas-limit") .map(|gas_limit| { @@ -429,17 +413,6 @@ impl Config { config.enable_latency_measurement_service = !cli_args.get_flag("disable-latency-measurement-service"); - if cli_args - .get_one::("latency-measurement-service") - .is_some() - { - warn!( - log, - "latency-measurement-service flag"; - "note" => "deprecated flag has no effect and should be removed" - ); - } - config.validator_registration_batch_size = parse_required(cli_args, "validator-registration-batch-size")?; if config.validator_registration_batch_size == 0 { diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index 074c5783475..e5477ff8df7 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -75,12 +75,6 @@ pub fn import( ))); } - info!( - log, - "Importing keystores via standard HTTP API"; - "count" => request.keystores.len(), - ); - // Import slashing protection data before keystores, so that new keystores don't start signing // without it. Do not return early on failure, propagate the failure to each key. let slashing_protection_status = @@ -156,6 +150,19 @@ pub fn import( statuses.push(status); } + let successful_import = statuses + .iter() + .filter(|status| matches!(status.status, ImportKeystoreStatus::Imported)) + .count(); + + if successful_import > 0 { + info!( + log, + "Imported keystores via standard HTTP API"; + "count" => successful_import, + ); + } + Ok(ImportKeystoresResponse { data: statuses }) } @@ -238,7 +245,23 @@ pub fn delete( task_executor: TaskExecutor, log: Logger, ) -> Result { - let export_response = export(request, validator_store, task_executor, log)?; + let export_response = export(request, validator_store, task_executor, log.clone())?; + + // Check the status is Deleted to confirm deletion is successful, then only display the log + let successful_deletion = export_response + .data + .iter() + .filter(|response| matches!(response.status.status, DeleteKeystoreStatus::Deleted)) + .count(); + + if successful_deletion > 0 { + info!( + log, + "Deleted keystore via standard HTTP API"; + "count" => successful_deletion, + ); + } + Ok(DeleteKeystoresResponse { data: export_response .data diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 8bc569c67a2..57e1080fd9b 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -38,7 +38,7 @@ pub const SUBSCRIPTIONS: &str = "subscriptions"; pub const LOCAL_KEYSTORE: &str = "local_keystore"; pub const WEB3SIGNER: &str = "web3signer"; -pub use lighthouse_metrics::*; +pub use metrics::*; pub static GENESIS_DISTANCE: LazyLock> = LazyLock::new(|| { try_create_int_gauge( @@ -316,9 +316,7 @@ pub fn gather_prometheus_metrics( warp_utils::metrics::scrape_health_metrics(); - encoder - .encode(&lighthouse_metrics::gather(), &mut buffer) - .unwrap(); + encoder.encode(&metrics::gather(), &mut buffer).unwrap(); String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index c94115e5ec5..0ef9a6a13d0 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -16,8 +16,8 @@ use account_utils::{ ZeroizeString, }; use eth2_keystore::Keystore; -use lighthouse_metrics::set_gauge; use lockfile::{Lockfile, LockfileError}; +use metrics::set_gauge; use parking_lot::{MappedMutexGuard, Mutex, MutexGuard}; use reqwest::{Certificate, Client, Error as ReqwestError, Identity}; use slog::{debug, error, info, warn, Logger}; diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 3033f65aeda..26d2a9f65fb 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -27,7 +27,7 @@ pub use beacon_node_health::BeaconNodeSyncDistanceTiers; pub use cli::cli_app; pub use config::Config; use initialized_validators::InitializedValidators; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use monitoring_api::{MonitoringHttpClient, ProcessType}; use sensitive_url::SensitiveUrl; pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 00d7b14de7c..cda13a5e63c 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -1,6 +1,6 @@ use crate::http_metrics; use crate::{DutiesService, ProductionValidatorClient}; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use slog::{debug, error, info, Logger}; use slot_clock::SlotClock; use tokio::time::{sleep, Duration}; diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 6753c50cff5..af59ad98924 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -19,8 +19,8 @@ use task_executor::TaskExecutor; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, - Domain, Epoch, EthSpec, Fork, ForkName, Graffiti, Hash256, PublicKeyBytes, SelectionProof, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, + Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, PublicKeyBytes, SelectionProof, Signature, + SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, @@ -353,17 +353,9 @@ impl ValidatorStore { fn signing_context(&self, domain: Domain, signing_epoch: Epoch) -> SigningContext { if domain == Domain::VoluntaryExit { - match self.spec.fork_name_at_epoch(signing_epoch) { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - SigningContext { - domain, - epoch: signing_epoch, - fork: self.fork(signing_epoch), - genesis_validators_root: self.genesis_validators_root, - } - } + if self.spec.fork_name_at_epoch(signing_epoch).deneb_enabled() { // EIP-7044 - ForkName::Deneb | ForkName::Electra => SigningContext { + SigningContext { domain, epoch: signing_epoch, fork: Fork { @@ -372,7 +364,14 @@ impl ValidatorStore { epoch: signing_epoch, }, genesis_validators_root: self.genesis_validators_root, - }, + } + } else { + SigningContext { + domain, + epoch: signing_epoch, + fork: self.fork(signing_epoch), + genesis_validators_root: self.genesis_validators_root, + } } } else { SigningContext { diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index ebcde6a8288..92267ad8755 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -20,6 +20,7 @@ tree_hash = { workspace = true } eth2 = { workspace = true } hex = { workspace = true } tokio = { workspace = true } +derivative = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index d06fce1d094..d4403b46131 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -45,15 +45,6 @@ pub fn cli_app() -> Command { Another, optional JSON file is created which contains a list of validator \ deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(OUTPUT_PATH_FLAG) .long(OUTPUT_PATH_FLAG) @@ -112,7 +103,9 @@ pub fn cli_app() -> Command { "When provided don't generate the deposits JSON file that is \ commonly used for submitting validator deposits via a web UI. \ Using this flag will save several seconds per validator if the \ - user has an alternate strategy for submitting deposits.", + user has an alternate strategy for submitting deposits. \ + If used, the --force-bls-withdrawal-credentials is also required \ + to ensure users are aware that an --eth1-withdrawal-address is not set.", ) .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs new file mode 100644 index 00000000000..6283279986a --- /dev/null +++ b/validator_manager/src/delete_validators.rs @@ -0,0 +1,293 @@ +use clap::{Arg, ArgAction, ArgMatches, Command}; +use eth2::{ + lighthouse_vc::types::{DeleteKeystoreStatus, DeleteKeystoresRequest}, + SensitiveUrl, +}; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use types::PublicKeyBytes; + +use crate::{common::vc_http_client, DumpConfig}; + +pub const CMD: &str = "delete"; +pub const VC_URL_FLAG: &str = "vc-url"; +pub const VC_TOKEN_FLAG: &str = "vc-token"; +pub const VALIDATOR_FLAG: &str = "validators"; + +#[derive(Debug)] +pub enum DeleteError { + InvalidPublicKey, + DeleteFailed(eth2::Error), +} + +pub fn cli_app() -> Command { + Command::new(CMD) + .about("Deletes one or more validators from a validator client using the HTTP API.") + .arg( + Arg::new(VC_URL_FLAG) + .long(VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help("A HTTP(S) address of a validator client using the keymanager-API.") + .default_value("http://localhost:5062") + .requires(VC_TOKEN_FLAG) + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VC_TOKEN_FLAG) + .long(VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the validator client.") + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VALIDATOR_FLAG) + .long(VALIDATOR_FLAG) + .value_name("STRING") + .help("Comma-separated list of validators (pubkey) that will be deleted.") + .action(ArgAction::Set) + .required(true) + .display_order(0), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct DeleteConfig { + pub vc_url: SensitiveUrl, + pub vc_token_path: PathBuf, + pub validators_to_delete: Vec, +} + +impl DeleteConfig { + fn from_cli(matches: &ArgMatches) -> Result { + let validators_to_delete_str = + clap_utils::parse_required::(matches, VALIDATOR_FLAG)?; + + let validators_to_delete = validators_to_delete_str + .split(',') + .map(|s| s.trim().parse()) + .collect::, _>>()?; + + Ok(Self { + vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, + validators_to_delete, + vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + }) + } +} + +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { + let config = DeleteConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await + } +} + +async fn run<'a>(config: DeleteConfig) -> Result<(), String> { + let DeleteConfig { + vc_url, + vc_token_path, + validators_to_delete, + } = config; + + let (http_client, validators) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + + for validator_to_delete in &validators_to_delete { + if !validators + .iter() + .any(|validator| &validator.validating_pubkey == validator_to_delete) + { + return Err(format!("Validator {} doesn't exist", validator_to_delete)); + } + } + + let delete_request = DeleteKeystoresRequest { + pubkeys: validators_to_delete.clone(), + }; + + let responses = http_client + .delete_keystores(&delete_request) + .await + .map_err(|e| format!("Error deleting keystore {}", e))? + .data; + + let mut error = false; + for (validator_to_delete, response) in validators_to_delete.iter().zip(responses.iter()) { + if response.status == DeleteKeystoreStatus::Error + || response.status == DeleteKeystoreStatus::NotFound + || response.status == DeleteKeystoreStatus::NotActive + { + error = true; + eprintln!( + "Problem with removing validator {:?}, status: {:?}", + validator_to_delete, response.status + ); + } + } + if error { + return Err("Problem with removing one or more validators".to_string()); + } + + eprintln!("Validator(s) deleted"); + Ok(()) +} + +#[cfg(not(debug_assertions))] +#[cfg(test)] +mod test { + use std::{ + fs::{self, File}, + io::Write, + str::FromStr, + }; + + use super::*; + use crate::{ + common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, + }; + use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + + struct TestBuilder { + delete_config: Option, + src_import_builder: Option, + http_config: HttpConfig, + vc_token: Option, + validators: Vec, + } + + impl TestBuilder { + async fn new() -> Self { + Self { + delete_config: None, + src_import_builder: None, + http_config: ApiTester::default_http_config(), + vc_token: None, + validators: vec![], + } + } + + async fn with_validators( + mut self, + count: u32, + first_index: u32, + indices_of_validators_to_delete: Vec, + ) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + + self.vc_token = + Some(fs::read_to_string(builder.get_import_config().vc_token_path).unwrap()); + + let local_validators: Vec = { + let contents = + fs::read_to_string(builder.get_import_config().validators_file_path.unwrap()) + .unwrap(); + serde_json::from_str(&contents).unwrap() + }; + + let import_config = builder.get_import_config(); + + let validators_to_delete = indices_of_validators_to_delete + .iter() + .map(|&index| { + PublicKeyBytes::from_str( + format!("0x{}", local_validators[index].voting_keystore.pubkey()).as_str(), + ) + .unwrap() + }) + .collect(); + + self.delete_config = Some(DeleteConfig { + vc_url: import_config.vc_url, + vc_token_path: import_config.vc_token_path, + validators_to_delete, + }); + + self.validators = local_validators.clone(); + self.src_import_builder = Some(builder); + self + } + + pub async fn run_test(self) -> TestResult { + let import_builder = self.src_import_builder.unwrap(); + let import_test_result = import_builder.run_test().await; + assert!(import_test_result.result.is_ok()); + + let path = self.delete_config.clone().unwrap().vc_token_path; + let url = self.delete_config.clone().unwrap().vc_url; + let parent = path.parent().unwrap(); + + fs::create_dir_all(parent).expect("Was not able to create parent directory"); + + File::options() + .write(true) + .read(true) + .create(true) + .truncate(true) + .open(path.clone()) + .unwrap() + .write_all(self.vc_token.clone().unwrap().as_bytes()) + .unwrap(); + + let result = run(self.delete_config.clone().unwrap()).await; + + if result.is_ok() { + let (_, list_keystores_response) = vc_http_client(url, path.clone()).await.unwrap(); + + // The remaining number of active keystores (left) = Total validators - Deleted validators (right) + assert_eq!( + list_keystores_response.len(), + self.validators.len() + - self + .delete_config + .clone() + .unwrap() + .validators_to_delete + .len() + ); + + // Check the remaining validator keys are not in validators_to_delete + assert!(list_keystores_response.iter().all(|keystore| { + !self + .delete_config + .clone() + .unwrap() + .validators_to_delete + .contains(&keystore.validating_pubkey) + })); + + return TestResult { result: Ok(()) }; + } + + TestResult { + result: Err(result.unwrap_err()), + } + } + } + + #[must_use] + struct TestResult { + result: Result<(), String>, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + } + #[tokio::test] + async fn delete_multiple_validators() { + TestBuilder::new() + .await + .with_validators(3, 0, vec![0, 1, 2]) + .await + .run_test() + .await + .assert_ok(); + } +} diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index f193e8d0fbd..6065ecb6035 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -1,16 +1,28 @@ use super::common::*; use crate::DumpConfig; +use account_utils::{eth2_keystore::Keystore, ZeroizeString}; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; +use derivative::Derivative; +use eth2::lighthouse_vc::types::KeystoreJsonStr; use eth2::{lighthouse_vc::std_types::ImportKeystoreStatus, SensitiveUrl}; use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; +use types::Address; pub const CMD: &str = "import"; pub const VALIDATORS_FILE_FLAG: &str = "validators-file"; +pub const KEYSTORE_FILE_FLAG: &str = "keystore-file"; pub const VC_URL_FLAG: &str = "vc-url"; pub const VC_TOKEN_FLAG: &str = "vc-token"; +pub const PASSWORD: &str = "password"; +pub const FEE_RECIPIENT: &str = "suggested-fee-recipient"; +pub const GAS_LIMIT: &str = "gas-limit"; +pub const BUILDER_PROPOSALS: &str = "builder-proposals"; +pub const BUILDER_BOOST_FACTOR: &str = "builder-boost-factor"; +pub const PREFER_BUILDER_PROPOSALS: &str = "prefer-builder-proposals"; +pub const ENABLED: &str = "enabled"; pub const DETECTED_DUPLICATE_MESSAGE: &str = "Duplicate validator detected!"; @@ -21,15 +33,6 @@ pub fn cli_app() -> Command { are defined in a JSON file which can be generated using the \"create-validators\" \ command.", ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(VALIDATORS_FILE_FLAG) .long(VALIDATORS_FILE_FLAG) @@ -39,19 +42,32 @@ pub fn cli_app() -> Command { imported to the validator client. This file is usually named \ \"validators.json\".", ) - .required(true) .action(ArgAction::Set) - .display_order(0), + .display_order(0) + .required_unless_present("keystore-file") + .conflicts_with("keystore-file"), + ) + .arg( + Arg::new(KEYSTORE_FILE_FLAG) + .long(KEYSTORE_FILE_FLAG) + .value_name("PATH_TO_KEYSTORE_FILE") + .help( + "The path to a keystore JSON file to be \ + imported to the validator client. This file is usually created \ + using staking-deposit-cli or ethstaker-deposit-cli", + ) + .action(ArgAction::Set) + .display_order(0) + .conflicts_with("validators-file") + .required_unless_present("validators-file") + .requires(PASSWORD), ) .arg( Arg::new(VC_URL_FLAG) .long(VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( - "A HTTP(S) address of a validator client using the keymanager-API. \ - If this value is not supplied then a 'dry run' will be conducted where \ - no changes are made to the validator client.", - ) + "A HTTP(S) address of a validator client using the keymanager-API.") .default_value("http://localhost:5062") .requires(VC_TOKEN_FLAG) .action(ArgAction::Set) @@ -80,29 +96,111 @@ pub fn cli_app() -> Command { ) .display_order(0), ) + .arg( + Arg::new(PASSWORD) + .long(PASSWORD) + .value_name("STRING") + .help("Password of the keystore file.") + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(FEE_RECIPIENT) + .long(FEE_RECIPIENT) + .value_name("ETH1_ADDRESS") + .help("When provided, the imported validator will use the suggested fee recipient. Omit this flag to use the default value from the VC.") + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(GAS_LIMIT) + .long(GAS_LIMIT) + .value_name("UINT64") + .help("When provided, the imported validator will use this gas limit. It is recommended \ + to leave this as the default value by not specifying this flag.",) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(BUILDER_PROPOSALS) + .long(BUILDER_PROPOSALS) + .help("When provided, the imported validator will attempt to create \ + blocks via builder rather than the local EL.",) + .value_parser(["true","false"]) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(BUILDER_BOOST_FACTOR) + .long(BUILDER_BOOST_FACTOR) + .value_name("UINT64") + .help("When provided, the imported validator will use this \ + percentage multiplier to apply to the builder's payload value \ + when choosing between a builder payload header and payload from \ + the local execution node.",) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(PREFER_BUILDER_PROPOSALS) + .long(PREFER_BUILDER_PROPOSALS) + .help("When provided, the imported validator will always prefer blocks \ + constructed by builders, regardless of payload value.",) + .value_parser(["true","false"]) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) } -#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Serialize, Deserialize, Derivative)] +#[derivative(Debug)] pub struct ImportConfig { - pub validators_file_path: PathBuf, + pub validators_file_path: Option, + pub keystore_file_path: Option, pub vc_url: SensitiveUrl, pub vc_token_path: PathBuf, pub ignore_duplicates: bool, + #[derivative(Debug = "ignore")] + pub password: Option, + pub fee_recipient: Option
, + pub gas_limit: Option, + pub builder_proposals: Option, + pub builder_boost_factor: Option, + pub prefer_builder_proposals: Option, + pub enabled: Option, } impl ImportConfig { fn from_cli(matches: &ArgMatches) -> Result { Ok(Self { - validators_file_path: clap_utils::parse_required(matches, VALIDATORS_FILE_FLAG)?, + validators_file_path: clap_utils::parse_optional(matches, VALIDATORS_FILE_FLAG)?, + keystore_file_path: clap_utils::parse_optional(matches, KEYSTORE_FILE_FLAG)?, vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, ignore_duplicates: matches.get_flag(IGNORE_DUPLICATES_FLAG), + password: clap_utils::parse_optional(matches, PASSWORD)?, + fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT)?, + gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT)?, + builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS)?, + builder_boost_factor: clap_utils::parse_optional(matches, BUILDER_BOOST_FACTOR)?, + prefer_builder_proposals: clap_utils::parse_optional( + matches, + PREFER_BUILDER_PROPOSALS, + )?, + enabled: clap_utils::parse_optional(matches, ENABLED)?, }) } } pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { let config = ImportConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { Ok(()) } else { @@ -113,27 +211,61 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() async fn run<'a>(config: ImportConfig) -> Result<(), String> { let ImportConfig { validators_file_path, + keystore_file_path, vc_url, vc_token_path, ignore_duplicates, + password, + fee_recipient, + gas_limit, + builder_proposals, + builder_boost_factor, + prefer_builder_proposals, + enabled, } = config; - if !validators_file_path.exists() { - return Err(format!("Unable to find file at {:?}", validators_file_path)); - } + let validators: Vec = + if let Some(validators_format_path) = &validators_file_path { + if !validators_format_path.exists() { + return Err(format!( + "Unable to find file at {:?}", + validators_format_path + )); + } - let validators_file = fs::OpenOptions::new() - .read(true) - .create(false) - .open(&validators_file_path) - .map_err(|e| format!("Unable to open {:?}: {:?}", validators_file_path, e))?; - let validators: Vec = serde_json::from_reader(&validators_file) - .map_err(|e| { - format!( - "Unable to parse JSON in {:?}: {:?}", - validators_file_path, e - ) - })?; + let validators_file = fs::OpenOptions::new() + .read(true) + .create(false) + .open(validators_format_path) + .map_err(|e| format!("Unable to open {:?}: {:?}", validators_format_path, e))?; + + serde_json::from_reader(&validators_file).map_err(|e| { + format!( + "Unable to parse JSON in {:?}: {:?}", + validators_format_path, e + ) + })? + } else if let Some(keystore_format_path) = &keystore_file_path { + vec![ValidatorSpecification { + voting_keystore: KeystoreJsonStr( + Keystore::from_json_file(keystore_format_path).map_err(|e| format!("{e:?}"))?, + ), + voting_keystore_password: password.ok_or_else(|| { + "The --password flag is required to supply the keystore password".to_string() + })?, + slashing_protection: None, + fee_recipient, + gas_limit, + builder_proposals, + builder_boost_factor, + prefer_builder_proposals, + enabled, + }] + } else { + return Err(format!( + "One of the flag --{VALIDATORS_FILE_FLAG} or --{KEYSTORE_FILE_FLAG} is required." + )); + }; let count = validators.len(); @@ -250,7 +382,10 @@ async fn run<'a>(config: ImportConfig) -> Result<(), String> { pub mod tests { use super::*; use crate::create_validators::tests::TestBuilder as CreateTestBuilder; - use std::fs; + use std::{ + fs::{self, File}, + str::FromStr, + }; use tempfile::{tempdir, TempDir}; use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; @@ -279,10 +414,18 @@ pub mod tests { Self { import_config: ImportConfig { // This field will be overwritten later on. - validators_file_path: dir.path().into(), + validators_file_path: Some(dir.path().into()), + keystore_file_path: Some(dir.path().into()), vc_url: vc.url.clone(), vc_token_path, ignore_duplicates: false, + password: Some(ZeroizeString::from_str("password").unwrap()), + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, }, vc, create_dir: None, @@ -295,6 +438,10 @@ pub mod tests { self } + pub fn get_import_config(&self) -> ImportConfig { + self.import_config.clone() + } + pub async fn create_validators(mut self, count: u32, first_index: u32) -> Self { let create_result = CreateTestBuilder::default() .mutate_config(|config| { @@ -307,7 +454,55 @@ pub mod tests { create_result.result.is_ok(), "precondition: validators are created" ); - self.import_config.validators_file_path = create_result.validators_file_path(); + self.import_config.validators_file_path = Some(create_result.validators_file_path()); + self.create_dir = Some(create_result.output_dir); + self + } + + // Keystore JSON requires a different format when creating valdiators + pub async fn create_validators_keystore_format( + mut self, + count: u32, + first_index: u32, + ) -> Self { + let create_result = CreateTestBuilder::default() + .mutate_config(|config| { + config.count = count; + config.first_index = first_index; + }) + .run_test() + .await; + assert!( + create_result.result.is_ok(), + "precondition: validators are created" + ); + + let validators_file_path = create_result.validators_file_path(); + + let validators_file = fs::OpenOptions::new() + .read(true) + .create(false) + .open(&validators_file_path) + .map_err(|e| format!("Unable to open {:?}: {:?}", validators_file_path, e)) + .unwrap(); + + let validators: Vec = serde_json::from_reader(&validators_file) + .map_err(|e| { + format!( + "Unable to parse JSON in {:?}: {:?}", + validators_file_path, e + ) + }) + .unwrap(); + + let validator = &validators[0]; + let validator_json = validator.voting_keystore.0.clone(); + + let keystore_file = File::create(&validators_file_path).unwrap(); + let _ = validator_json.to_json_writer(keystore_file); + + self.import_config.keystore_file_path = Some(create_result.validators_file_path()); + self.import_config.password = Some(validator.voting_keystore_password.clone()); self.create_dir = Some(create_result.output_dir); self } @@ -327,7 +522,8 @@ pub mod tests { let local_validators: Vec = { let contents = - fs::read_to_string(&self.import_config.validators_file_path).unwrap(); + fs::read_to_string(&self.import_config.validators_file_path.unwrap()) + .unwrap(); serde_json::from_str(&contents).unwrap() }; let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; @@ -355,6 +551,39 @@ pub mod tests { vc: self.vc, } } + + pub async fn run_test_keystore_format(self) -> TestResult { + let result = run(self.import_config.clone()).await; + + if result.is_ok() { + self.vc.ensure_key_cache_consistency().await; + + let local_keystore: Keystore = + Keystore::from_json_file(&self.import_config.keystore_file_path.unwrap()) + .unwrap(); + + let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; + + assert_eq!( + 1, + list_keystores_response.len(), + "vc should have exactly the number of validators imported" + ); + + let local_pubkey = local_keystore.public_key().unwrap().into(); + let remote_validator = list_keystores_response + .iter() + .find(|validator| validator.validating_pubkey == local_pubkey) + .expect("validator must exist on VC"); + assert_eq!(&remote_validator.derivation_path, &local_keystore.path()); + assert_eq!(remote_validator.readonly, Some(false)); + } + + TestResult { + result, + vc: self.vc, + } + } } #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. @@ -445,4 +674,66 @@ pub mod tests { .await .assert_ok(); } + + #[tokio::test] + async fn create_one_validator_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + // Set validators_file_path to None so that keystore_file_path is used for tests with the keystore format + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 0) + .await + .run_test_keystore_format() + .await + .assert_ok(); + } + + #[tokio::test] + async fn create_one_validator_with_offset_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 42) + .await + .run_test_keystore_format() + .await + .assert_ok(); + } + + #[tokio::test] + async fn import_duplicates_when_disallowed_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 0) + .await + .import_validators_without_checks() + .await + .run_test_keystore_format() + .await + .assert_err_contains("DuplicateValidator"); + } + + #[tokio::test] + async fn import_duplicates_when_allowed_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.ignore_duplicates = true; + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 0) + .await + .import_validators_without_checks() + .await + .run_test_keystore_format() + .await + .assert_ok(); + } } diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index 222dd7076de..8e43cd59772 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -8,7 +8,9 @@ use types::EthSpec; pub mod common; pub mod create_validators; +pub mod delete_validators; pub mod import_validators; +pub mod list_validators; pub mod move_validators; pub const CMD: &str = "validator_manager"; @@ -51,11 +53,14 @@ pub fn cli_app() -> Command { .help("Prints help information") .action(ArgAction::HelpLong) .display_order(0) - .help_heading(FLAG_HEADER), + .help_heading(FLAG_HEADER) + .global(true), ) .subcommand(create_validators::cli_app()) .subcommand(import_validators::cli_app()) .subcommand(move_validators::cli_app()) + .subcommand(list_validators::cli_app()) + .subcommand(delete_validators::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. @@ -83,6 +88,13 @@ pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), Some((move_validators::CMD, matches)) => { move_validators::cli_run(matches, dump_config).await } + Some((list_validators::CMD, matches)) => { + list_validators::cli_run(matches, dump_config).await + } + Some((delete_validators::CMD, matches)) => { + delete_validators::cli_run(matches, dump_config).await + } + Some(("", _)) => Err("No command supplied. See --help.".to_string()), Some((unknown, _)) => Err(format!( "{} is not a valid {} command. See --help.", unknown, CMD diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs new file mode 100644 index 00000000000..7df85a7eb9d --- /dev/null +++ b/validator_manager/src/list_validators.rs @@ -0,0 +1,201 @@ +use clap::{Arg, ArgAction, ArgMatches, Command}; +use eth2::lighthouse_vc::types::SingleKeystoreResponse; +use eth2::SensitiveUrl; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; + +use crate::{common::vc_http_client, DumpConfig}; + +pub const CMD: &str = "list"; +pub const VC_URL_FLAG: &str = "vc-url"; +pub const VC_TOKEN_FLAG: &str = "vc-token"; + +pub fn cli_app() -> Command { + Command::new(CMD) + .about("Lists all validators in a validator client using the HTTP API.") + .arg( + Arg::new(VC_URL_FLAG) + .long(VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help("A HTTP(S) address of a validator client using the keymanager-API.") + .default_value("http://localhost:5062") + .requires(VC_TOKEN_FLAG) + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VC_TOKEN_FLAG) + .long(VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the validator client.") + .action(ArgAction::Set) + .display_order(0), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct ListConfig { + pub vc_url: SensitiveUrl, + pub vc_token_path: PathBuf, +} + +impl ListConfig { + fn from_cli(matches: &ArgMatches) -> Result { + Ok(Self { + vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, + vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + }) + } +} + +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { + let config = ListConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await?; + Ok(()) + } +} + +async fn run<'a>(config: ListConfig) -> Result, String> { + let ListConfig { + vc_url, + vc_token_path, + } = config; + + let (_, validators) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + + println!("List of validators ({}):", validators.len()); + + for validator in &validators { + println!("{}", validator.validating_pubkey); + } + + Ok(validators) +} + +#[cfg(not(debug_assertions))] +#[cfg(test)] +mod test { + use std::{ + fs::{self, File}, + io::Write, + }; + + use super::*; + use crate::{ + common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, + }; + use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + + struct TestBuilder { + list_config: Option, + src_import_builder: Option, + http_config: HttpConfig, + vc_token: Option, + validators: Vec, + } + + impl TestBuilder { + async fn new() -> Self { + Self { + list_config: None, + src_import_builder: None, + http_config: ApiTester::default_http_config(), + vc_token: None, + validators: vec![], + } + } + + async fn with_validators(mut self, count: u32, first_index: u32) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + self.list_config = Some(ListConfig { + vc_url: builder.get_import_config().vc_url, + vc_token_path: builder.get_import_config().vc_token_path, + }); + + self.vc_token = + Some(fs::read_to_string(builder.get_import_config().vc_token_path).unwrap()); + + let local_validators: Vec = { + let contents = + fs::read_to_string(builder.get_import_config().validators_file_path.unwrap()) + .unwrap(); + serde_json::from_str(&contents).unwrap() + }; + + self.validators = local_validators.clone(); + self.src_import_builder = Some(builder); + self + } + + pub async fn run_test(self) -> TestResult { + let import_test_result = self.src_import_builder.unwrap().run_test().await; + assert!(import_test_result.result.is_ok()); + + let path = self.list_config.clone().unwrap().vc_token_path; + let parent = path.parent().unwrap(); + + fs::create_dir_all(parent).expect("Was not able to create parent directory"); + + File::options() + .write(true) + .read(true) + .create(true) + .truncate(true) + .open(path) + .unwrap() + .write_all(self.vc_token.clone().unwrap().as_bytes()) + .unwrap(); + + let result = run(self.list_config.clone().unwrap()).await; + + if result.is_ok() { + let result_ref = result.as_ref().unwrap(); + + for local_validator in &self.validators { + let local_keystore = &local_validator.voting_keystore.0; + let local_pubkey = local_keystore.public_key().unwrap(); + assert!( + result_ref + .iter() + .any(|validator| validator.validating_pubkey + == local_pubkey.clone().into()), + "local validator pubkey not found in result" + ); + } + + return TestResult { result: Ok(()) }; + } + + TestResult { + result: Err(result.unwrap_err()), + } + } + } + + #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. + struct TestResult { + result: Result<(), String>, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + } + #[tokio::test] + async fn list_all_validators() { + TestBuilder::new() + .await + .with_validators(3, 0) + .await + .run_test() + .await + .assert_ok(); + } +} diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 91bc2b0ef85..7651917ea94 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -2,7 +2,6 @@ use super::common::*; use crate::DumpConfig; use account_utils::{read_password_from_user, ZeroizeString}; use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::FLAG_HEADER; use eth2::{ lighthouse_vc::{ std_types::{ @@ -75,15 +74,6 @@ pub fn cli_app() -> Command { command. This command only supports validators signing via a keystore on the local \ file system (i.e., not Web3Signer validators).", ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(SRC_VC_URL_FLAG) .long(SRC_VC_URL_FLAG)