From ad7dbe63da1b7142cea7bbd17e2807fddf5c4c7d Mon Sep 17 00:00:00 2001 From: drewstone Date: Thu, 20 Feb 2025 14:14:00 -0700 Subject: [PATCH] feat: new networking (#664) Co-authored-by: Serial <69764315+Serial-ATA@users.noreply.github.com> Co-authored-by: Shady Khalifa fixes (#667) --- .cursor/rules/p2p.mdc | 46 + .gitignore | 2 + Cargo.lock | 860 +++++++++++------- Cargo.toml | 20 +- blueprints/examples/src/lib.rs | 1 - blueprints/examples/src/main.rs | 3 +- blueprints/examples/src/raw_tangle_events.rs | 64 -- crates/clients/Cargo.toml | 8 +- crates/clients/networking/Cargo.toml | 34 - crates/clients/networking/src/error.rs | 22 - crates/clients/networking/src/lib.rs | 4 - crates/clients/networking/src/p2p.rs | 117 --- crates/clients/tangle/src/client.rs | 13 +- crates/config/src/context_config.rs | 29 + crates/config/src/lib.rs | 76 +- crates/contexts/Cargo.toml | 16 +- crates/contexts/src/lib.rs | 2 - crates/contexts/src/p2p.rs | 14 - crates/crypto/k256/src/lib.rs | 67 +- crates/crypto/sp-core/src/lib.rs | 20 +- .../blueprint-proc-macro/src/job/mod.rs | 11 +- .../macros/blueprint-proc-macro/src/shared.rs | 2 +- crates/macros/context-derive/src/lib.rs | 18 - crates/macros/context-derive/src/p2p.rs | 41 - .../macros/context-derive/tests/ui/basic.rs | 43 +- crates/networking/Cargo.toml | 16 +- crates/networking/README.md | 178 ++++ .../extensions/round-based/Cargo.toml | 58 ++ .../extensions/round-based/src/lib.rs | 254 ++++++ .../extensions/round-based/src/tests.rs | 61 ++ .../round-based/tests/common/mod.rs | 242 +++++ .../round-based/tests/rand_protocol.rs | 299 ++++++ crates/networking/src/behaviours.rs | 125 +++ .../src/blueprint_protocol/behaviour.rs | 499 ++++++++++ .../src/blueprint_protocol/handler.rs | 268 ++++++ .../networking/src/blueprint_protocol/mod.rs | 114 +++ crates/networking/src/discovery/behaviour.rs | 394 ++++++++ crates/networking/src/discovery/config.rs | 188 ++++ crates/networking/src/discovery/mod.rs | 32 + crates/networking/src/discovery/peers.rs | 286 ++++++ crates/networking/src/error.rs | 24 + crates/networking/src/gossip.rs | 430 --------- .../src/handlers/blueprint_protocol.rs} | 0 crates/networking/src/handlers/connections.rs | 140 --- crates/networking/src/handlers/dcutr.rs | 8 - crates/networking/src/handlers/discovery.rs | 1 + crates/networking/src/handlers/gossip.rs | 117 --- crates/networking/src/handlers/identify.rs | 41 - crates/networking/src/handlers/kadmelia.rs | 9 - crates/networking/src/handlers/mdns.rs | 32 - crates/networking/src/handlers/mod.rs | 11 +- crates/networking/src/handlers/p2p.rs | 205 ----- crates/networking/src/handlers/ping.rs | 7 - crates/networking/src/handlers/relay.rs | 8 - crates/networking/src/lib.rs | 39 +- crates/networking/src/messaging.rs | 468 ---------- crates/networking/src/networking.rs | 603 ------------ crates/networking/src/networking/tests.rs | 769 ---------------- crates/networking/src/round_based_compat.rs | 229 ----- crates/networking/src/service.rs | 560 ++++++++++++ crates/networking/src/service_handle.rs | 185 ++++ crates/networking/src/setup.rs | 334 ------- crates/networking/src/test_helpers.rs | 1 + .../src/tests/blueprint_protocol.rs | 564 ++++++++++++ crates/networking/src/tests/discovery.rs | 140 +++ crates/networking/src/tests/gossip.rs | 199 ++++ crates/networking/src/tests/handshake.rs | 133 +++ crates/networking/src/tests/mod.rs | 350 +++++++ crates/networking/src/types.rs | 114 +++ crates/sdk/Cargo.toml | 39 +- crates/sdk/src/lib.rs | 2 + crates/testing-utils/tangle/src/harness.rs | 2 +- .../tangle/src/node/transactions.rs | 3 +- flake.lock | 18 +- flake.nix | 28 +- 75 files changed, 6100 insertions(+), 4260 deletions(-) create mode 100644 .cursor/rules/p2p.mdc delete mode 100644 blueprints/examples/src/raw_tangle_events.rs delete mode 100644 crates/clients/networking/Cargo.toml delete mode 100644 crates/clients/networking/src/error.rs delete mode 100644 crates/clients/networking/src/lib.rs delete mode 100644 crates/clients/networking/src/p2p.rs delete mode 100644 crates/macros/context-derive/src/p2p.rs create mode 100644 crates/networking/README.md create mode 100644 crates/networking/extensions/round-based/Cargo.toml create mode 100644 crates/networking/extensions/round-based/src/lib.rs create mode 100644 crates/networking/extensions/round-based/src/tests.rs create mode 100644 crates/networking/extensions/round-based/tests/common/mod.rs create mode 100644 crates/networking/extensions/round-based/tests/rand_protocol.rs create mode 100644 crates/networking/src/behaviours.rs create mode 100644 crates/networking/src/blueprint_protocol/behaviour.rs create mode 100644 crates/networking/src/blueprint_protocol/handler.rs create mode 100644 crates/networking/src/blueprint_protocol/mod.rs create mode 100644 crates/networking/src/discovery/behaviour.rs create mode 100644 crates/networking/src/discovery/config.rs create mode 100644 crates/networking/src/discovery/mod.rs create mode 100644 crates/networking/src/discovery/peers.rs delete mode 100644 crates/networking/src/gossip.rs rename crates/{crypto/sp-core/src/sp_core_util.rs => networking/src/handlers/blueprint_protocol.rs} (100%) delete mode 100644 crates/networking/src/handlers/connections.rs delete mode 100644 crates/networking/src/handlers/dcutr.rs create mode 100644 crates/networking/src/handlers/discovery.rs delete mode 100644 crates/networking/src/handlers/gossip.rs delete mode 100644 crates/networking/src/handlers/identify.rs delete mode 100644 crates/networking/src/handlers/kadmelia.rs delete mode 100644 crates/networking/src/handlers/mdns.rs delete mode 100644 crates/networking/src/handlers/p2p.rs delete mode 100644 crates/networking/src/handlers/relay.rs delete mode 100644 crates/networking/src/messaging.rs delete mode 100644 crates/networking/src/networking.rs delete mode 100644 crates/networking/src/networking/tests.rs delete mode 100644 crates/networking/src/round_based_compat.rs create mode 100644 crates/networking/src/service.rs create mode 100644 crates/networking/src/service_handle.rs create mode 100644 crates/networking/src/test_helpers.rs create mode 100644 crates/networking/src/tests/blueprint_protocol.rs create mode 100644 crates/networking/src/tests/discovery.rs create mode 100644 crates/networking/src/tests/gossip.rs create mode 100644 crates/networking/src/tests/handshake.rs create mode 100644 crates/networking/src/tests/mod.rs create mode 100644 crates/networking/src/types.rs diff --git a/.cursor/rules/p2p.mdc b/.cursor/rules/p2p.mdc new file mode 100644 index 000000000..f1302339c --- /dev/null +++ b/.cursor/rules/p2p.mdc @@ -0,0 +1,46 @@ +--- +description: Peer to peer networking expert & Rust engineer +globs: **/*.rs +--- +# Peer to peer networking expert + +## P2P Development Cursor (Rust + libp2p) + +1. **Initial Analysis** + - Review input requirements and resources + - Identify minimal viable protocol components + - Review code that exists. + - Never build new when it already exists. Improve instead, remove, and optimize. + - Move tests to a `src/tests` directory, create it if it doesn't exist. + +2. **Implementation Flow** + - Start with concise implementations and build from there. + - Suggest improvements with sound reasoning. + - Leverage libp2p's modular design patterns + - Leverage existing code and improve it, optimize it. + - When a code snippet is provided, understand it, and adapt it with existing code. These are meant as resources not as copy/pastes. + +3. **Code Standards** + - Implement proper error handling with custom error types + - Prioritize concise and efficient code. + - Add relevant and detailed documentation + - Always put tests inside a `src/tests` directory + +4. **Efficiency Guidelines** + - Prefer bounded channels for peer message handling + - Implement connection pooling where appropriate + - Leverage existing libp2p protocols before custom ones + +5. **Review & Integration** + - Verify protocol compatibility + - Test network behaviour under various conditions + - Test in `src/tests` directory + - Ensure proper resource cleanup + - Document failure modes and recovery + +6. **Core Principles** + - Start minimal, expand as needed + - Test thoroughly between iterations + - Maintain clear protocol boundaries + - Document network assumptions and requirements + - \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0e1057e4f..6668e6c2d 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,6 @@ node_modules blueprints/**/dependencies/ blueprint.lock +blueprint.json +cache target diff --git a/Cargo.lock b/Cargo.lock index ab6d65353..8ed8ab724 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -99,7 +99,7 @@ dependencies = [ "getrandom 0.2.15", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -155,9 +155,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.1.59" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d37bc62b68c056e3742265ab73c73d413d07357909e0e4ea1e95453066a7469" +checksum = "3a754dbb534198644cb8355b8c23f4aaecf03670fb9409242be1fa1e25897ee9" dependencies = [ "alloy-primitives 0.8.21", "num_enum", @@ -376,7 +376,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives 0.8.21", "k256", - "rand", + "rand 0.8.5", "serde_json", "tempfile", "thiserror 2.0.11", @@ -398,7 +398,7 @@ dependencies = [ "hex-literal", "itoa", "proptest", - "rand", + "rand 0.8.5", "ruint", "serde", "tiny-keccak", @@ -423,7 +423,7 @@ dependencies = [ "keccak-asm", "paste", "proptest", - "rand", + "rand 0.8.5", "ruint", "rustc-hash 2.1.1", "serde", @@ -695,7 +695,7 @@ dependencies = [ "coins-bip32 0.12.0", "coins-bip39 0.12.0", "k256", - "rand", + "rand 0.8.5", "thiserror 2.0.11", ] @@ -1461,7 +1461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -1471,7 +1471,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", "rayon", ] @@ -1482,7 +1482,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -1753,6 +1753,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "async-signal" version = "0.2.10" @@ -1859,11 +1870,11 @@ dependencies = [ [[package]] name = "auth-git2" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3810b5af212b013fe7302b12d86616c6c39a48e18f2e4b812a5a9e5710213791" +checksum = "d55eead120c93036f531829cf9b85830a474e75ce71169680879d28078321ddc" dependencies = [ - "dirs", + "dirs 6.0.0", "git2", "terminal-prompt", ] @@ -1974,9 +1985,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.59.0" +version = "1.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da21a543bfc06001637785ec6ed7b4bec9a3d737f0abca3ddd8d7b962e71fb1" +checksum = "adc36035f7393a24719069c9a2f52e20972f7ee8472bd788e863968736acc449" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1996,9 +2007,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.59.0" +version = "1.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9f944ef032717596639cea4a2118a3a457268ef51bbb5fde9637e54c465da00" +checksum = "dc1cfe5e16b90421ea031f4c6348b534ef442e76f6bf4a1b2b592c12cc2c6af9" dependencies = [ "aws-credential-types", "aws-runtime", @@ -2019,9 +2030,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.2.8" +version = "1.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bc5bbd1e4a2648fd8c5982af03935972c24a2f9846b396de661d351ee3ce837" +checksum = "9bfe75fad52793ce6dec0dc3d4b1f388f038b5eb866c8d4d7f3a8e21b5ea5051" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -2531,9 +2542,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4378725facc195f1a538864863f6de233b500a8862747e7f165078a419d5e874" +checksum = "47c79a94619fade3c0b887670333513a67ac28a6a7e653eb260bf0d4103db38d" dependencies = [ "cc", "glob", @@ -2616,7 +2627,6 @@ dependencies = [ "blueprint-build-utils", "blueprint-metadata", "eigensdk", - "gadget-client-networking", "gadget-clients", "gadget-config", "gadget-context-derive", @@ -2628,6 +2638,7 @@ dependencies = [ "gadget-logging", "gadget-macros", "gadget-networking", + "gadget-networking-round-based-extension", "gadget-runners", "gadget-std", "gadget-stores", @@ -2956,9 +2967,9 @@ dependencies = [ [[package]] name = "bridge-runtime-common" -version = "0.18.2" +version = "0.18.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86cf718057e18ce3e5f2c8e3fc318c38ad71d47ada91dc4b841c0f69c214ef04" +checksum = "789eb7841c8791991317ec4b6e56c119e5e1c2e480ad293b8502736fd7f64b2e" dependencies = [ "bp-header-chain", "bp-messages", @@ -2966,7 +2977,6 @@ dependencies = [ "bp-polkadot-core", "bp-relayers", "bp-runtime", - "bp-xcm-bridge-hub", "frame-support", "frame-system", "log", @@ -3258,9 +3268,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.13" +version = "1.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7777341816418c02e033934a09f20dc0ccaf65a5201ef8a450ae0105a573fda" +checksum = "0c3d1b2e905a3a7b00a6141adb0e4c0bb941d11caf55349d863942a1cc44e3c9" dependencies = [ "jobserver", "libc", @@ -3386,9 +3396,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.29" +version = "4.5.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acebd8ad879283633b343856142139f2da2317c96b05b4dd6181c61e2480184" +checksum = "92b7b18d71fad5313a1e320fa9897994228ce274b60faa4d694fe0ea89cd9e6d" dependencies = [ "clap_builder", "clap_derive", @@ -3406,9 +3416,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.29" +version = "4.5.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ba32cbda51c7e1dfd49acc1457ba1a7dec5b64fe360e828acb13ca8dc9c2f9" +checksum = "a35db2071778a7344791a4fb4f95308b5673d219dee3ae348b86642574ecc90c" dependencies = [ "anstream", "anstyle", @@ -3497,7 +3507,7 @@ dependencies = [ "hmac 0.12.1", "once_cell", "pbkdf2 0.12.2", - "rand", + "rand 0.8.5", "sha2 0.10.8", "thiserror 1.0.69", ] @@ -3513,7 +3523,7 @@ dependencies = [ "hmac 0.12.1", "once_cell", "pbkdf2 0.12.2", - "rand", + "rand 0.8.5", "sha2 0.10.8", "thiserror 1.0.69", ] @@ -3924,6 +3934,24 @@ dependencies = [ "chrono", ] +[[package]] +name = "crossbeam" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -3971,7 +3999,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -3983,7 +4011,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "typenum", ] @@ -4174,9 +4202,9 @@ dependencies = [ [[package]] name = "cumulus-pallet-xcmp-queue" -version = "0.17.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f788bdac9474795ea13ba791b55798fb664b2e3da8c3a7385b480c9af4e6539" +checksum = "cbc44222c528b88dcc6e921e7a0dc94d66b5895aab9e9d9db8798fc62f7ccd40" dependencies = [ "bounded-collections", "bp-xcm-bridge-hub-router", @@ -4369,9 +4397,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc49567e08c72902f4cbc7242ee8d874ec9cbe97fbabf77b4e0e1f447513e13a" +checksum = "8bc580dceb395cae0efdde0a88f034cfd8a276897e40c693a7b87bed17971d33" dependencies = [ "cc", "cxxbridge-cmd", @@ -4383,9 +4411,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe46b5309c99e9775e7a338c98e4097455f52db5b684fd793ca22848fde6e371" +checksum = "49d8c1baedad72a7efda12ad8d7ad687b3e7221dfb304a12443fd69e9de8bb30" dependencies = [ "cc", "codespan-reporting", @@ -4397,9 +4425,9 @@ dependencies = [ [[package]] name = "cxxbridge-cmd" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4315c4ce8d23c26d87f2f83698725fd5718d8e6ace4a9093da2664d23294d372" +checksum = "e43afb0e3b2ef293492a31ecd796af902112460d53e5f923f7804f348a769f9c" dependencies = [ "clap", "codespan-reporting", @@ -4410,15 +4438,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55d69deb3a92f610a60ecc524a72c7374b6dc822f8fb7bb4e5d9473f10530c4" +checksum = "0257ad2096a2474fe877e9e055ab69603851c3d6b394efcc7e0443899c2492ce" [[package]] name = "cxxbridge-macro" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bee7a1d9b5091462002c2b8de2a4ed0f0fde011d503cc272633f66075bd5141" +checksum = "b46cbd7358a46b760609f1cb5093683328e58ca50e594a308716f5403fdc03e5" dependencies = [ "proc-macro2", "quote", @@ -4653,7 +4681,16 @@ version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" dependencies = [ - "dirs-sys", + "dirs-sys 0.4.1", +] + +[[package]] +name = "dirs" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" +dependencies = [ + "dirs-sys 0.5.0", ] [[package]] @@ -4674,10 +4711,22 @@ checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", "option-ext", - "redox_users", + "redox_users 0.4.6", "windows-sys 0.48.0", ] +[[package]] +name = "dirs-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" +dependencies = [ + "libc", + "option-ext", + "redox_users 0.5.0", + "windows-sys 0.59.0", +] + [[package]] name = "dirs-sys-next" version = "0.1.2" @@ -4685,7 +4734,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ "libc", - "redox_users", + "redox_users 0.4.6", "winapi", ] @@ -4823,7 +4872,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2 0.10.8", "subtle", @@ -4842,7 +4891,7 @@ dependencies = [ "hashbrown 0.14.5", "hex", "pkcs8", - "rand_core", + "rand_core 0.6.4", "serde", "sha2 0.10.8", "zeroize", @@ -5191,7 +5240,7 @@ dependencies = [ "group", "pem-rfc7468", "pkcs8", - "rand_core", + "rand_core 0.6.4", "sec1", "serdect", "subtle", @@ -5239,7 +5288,7 @@ dependencies = [ "hex", "k256", "log", - "rand", + "rand 0.8.5", "rlp", "serde", "sha3", @@ -5366,9 +5415,9 @@ checksum = "e48c92028aaa870e83d51c64e5d4e0b6981b360c522198c23959f219a4e1b15b" [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "erased-serde" @@ -5425,7 +5474,7 @@ dependencies = [ "hex", "hmac 0.12.1", "pbkdf2 0.11.0", - "rand", + "rand 0.8.5", "scrypt 0.10.0", "serde", "serde_json", @@ -5598,7 +5647,7 @@ dependencies = [ "num_enum", "once_cell", "open-fastrlp", - "rand", + "rand 0.8.5", "rlp", "serde", "serde_json", @@ -5703,7 +5752,7 @@ dependencies = [ "elliptic-curve", "eth-keystore", "ethers-core", - "rand", + "rand 0.8.5", "sha2 0.10.8", "thiserror 1.0.69", "tracing", @@ -5717,7 +5766,7 @@ checksum = "66244a771d9163282646dbeffe0e6eca4dda4146b6498644e678ac6089b11edd" dependencies = [ "cfg-if 1.0.0", "const-hex", - "dirs", + "dirs 5.0.1", "dunce", "ethers-core", "glob", @@ -5828,7 +5877,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -5903,7 +5952,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -6418,17 +6467,6 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" -[[package]] -name = "futures-ticker" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" -dependencies = [ - "futures", - "futures-timer", - "instant", -] - [[package]] name = "futures-timer" version = "3.0.3" @@ -6609,22 +6647,6 @@ dependencies = [ "url", ] -[[package]] -name = "gadget-client-networking" -version = "0.1.0" -dependencies = [ - "gadget-client-core", - "gadget-config", - "gadget-crypto", - "gadget-logging", - "gadget-networking", - "gadget-std", - "libp2p", - "serde", - "serde_json", - "thiserror 2.0.11", -] - [[package]] name = "gadget-client-tangle" version = "0.1.0" @@ -6654,7 +6676,6 @@ dependencies = [ "gadget-client-core", "gadget-client-eigenlayer", "gadget-client-evm", - "gadget-client-networking", "gadget-client-tangle", "gadget-std", "thiserror 2.0.11", @@ -7118,13 +7139,19 @@ dependencies = [ name = "gadget-networking" version = "0.1.0" dependencies = [ + "anyhow", "async-trait", "auto_impl", "bincode", + "blake3", + "crossbeam-channel", "dashmap", + "fastrand", "futures", "gadget-crypto", + "gadget-crypto-core", "gadget-logging", + "gadget-networking", "gadget-std", "hex", "itertools 0.14.0", @@ -7133,9 +7160,35 @@ dependencies = [ "libp2p", "lru-mem", "parking_lot 0.12.3", + "serde", + "serde_json", + "thiserror 2.0.11", + "tokio", + "tokio-stream", + "tracing", + "tracing-subscriber 0.3.19", +] + +[[package]] +name = "gadget-networking-round-based-extension" +version = "0.1.0" +dependencies = [ + "crossbeam", + "crossbeam-channel", + "dashmap", + "futures", + "gadget-crypto", + "gadget-crypto-core", + "gadget-networking", + "generic-array", + "hex", + "libp2p", + "rand 0.8.5", + "rand_dev", "round-based", "serde", "serde_json", + "sha2 0.10.8", "thiserror 2.0.11", "tokio", "tracing", @@ -7184,7 +7237,7 @@ dependencies = [ "gadget-runner-core", "gadget-std", "gadget-utils", - "rand", + "rand 0.8.5", "thiserror 2.0.11", ] @@ -7225,7 +7278,7 @@ dependencies = [ "async-trait", "colored", "num-traits", - "rand", + "rand 0.8.5", "rayon", "thiserror 2.0.11", "tokio", @@ -7261,7 +7314,7 @@ dependencies = [ "alloy-transport", "async-trait", "cargo-tangle", - "dirs", + "dirs 5.0.1", "futures", "gadget-blueprint-serde", "gadget-client-tangle", @@ -7378,12 +7431,26 @@ dependencies = [ "url", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ + "serde", "typenum", "version_check", "zeroize", @@ -7420,8 +7487,8 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" dependencies = [ - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", ] [[package]] @@ -7765,7 +7832,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -7872,6 +7939,15 @@ dependencies = [ "fxhash", ] +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "heck" version = "0.4.1" @@ -7934,10 +8010,11 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hickory-proto" -version = "0.24.3" +version = "0.25.0-alpha.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad3d6d98c648ed628df039541a5577bee1a7c83e9e16fe3dbedeea4cdfeb971" +checksum = "1d00147af6310f4392a31680db52a3ed45a2e0f68eb18e8c3fe5537ecc96d9e2" dependencies = [ + "async-recursion", "async-trait", "cfg-if 1.0.0", "data-encoding", @@ -7948,9 +8025,9 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand", + "rand 0.9.0", "socket2", - "thiserror 1.0.69", + "thiserror 2.0.11", "tinyvec", "tokio", "tracing", @@ -7959,21 +8036,21 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.3" +version = "0.25.0-alpha.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf287bde7b776e85d7188e6e5db7cf410a2f9531fe82817eb87feed034c8d14" +checksum = "5762f69ebdbd4ddb2e975cd24690bf21fe6b2604039189c26acddbc427f12887" dependencies = [ "cfg-if 1.0.0", "futures-util", "hickory-proto", "ipconfig", - "lru-cache", + "moka", "once_cell", "parking_lot 0.12.3", - "rand", + "rand 0.9.0", "resolv-conf", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -8496,18 +8573,20 @@ dependencies = [ [[package]] name = "igd-next" -version = "0.14.3" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +checksum = "76b0d7d4541def58a37bf8efc559683f21edce7c82f0d866c93ac21f7e098f93" dependencies = [ "async-trait", "attohttpc", "bytes", "futures", - "http 0.2.12", - "hyper 0.14.32", + "http 1.2.0", + "http-body-util", + "hyper 1.6.0", + "hyper-util", "log", - "rand", + "rand 0.8.5", "tokio", "url", "xmltree", @@ -9101,7 +9180,7 @@ dependencies = [ "base64 0.22.1", "js-sys", "pem 3.0.4", - "ring 0.17.8", + "ring 0.17.9", "serde", "serde_json", "simple_asn1 0.6.3", @@ -9241,9 +9320,9 @@ checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p" -version = "0.54.1" +version = "0.55.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" +checksum = "b72dc443ddd0254cb49a794ed6b6728400ee446a0f7ab4a07d0209ee98de20e9" dependencies = [ "bytes", "either", @@ -9274,30 +9353,28 @@ dependencies = [ "multiaddr", "pin-project 1.1.9", "rw-stream-sink", - "thiserror 1.0.69", + "thiserror 2.0.11", ] [[package]] name = "libp2p-allow-block-list" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" +checksum = "38944b7cb981cc93f2f0fb411ff82d0e983bd226fbcc8d559639a3a73236568b" dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "void", ] [[package]] name = "libp2p-autonat" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a083675f189803d0682a2726131628e808144911dad076858bfbe30b13065499" +checksum = "e297bfc6cabb70c6180707f8fa05661b77ecb9cb67e8e8e1c469301358fa21d0" dependencies = [ "async-trait", "asynchronous-codec", - "bytes", "either", "futures", "futures-bounded", @@ -9308,31 +9385,29 @@ dependencies = [ "libp2p-swarm", "quick-protobuf", "quick-protobuf-codec", - "rand", - "rand_core", - "thiserror 1.0.69", + "rand 0.8.5", + "rand_core 0.6.4", + "thiserror 2.0.11", "tracing", - "void", "web-time", ] [[package]] name = "libp2p-connection-limits" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +checksum = "efe9323175a17caa8a2ed4feaf8a548eeef5e0b72d03840a0eab4bcb0210ce1c" dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "void", ] [[package]] name = "libp2p-core" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" +checksum = "193c75710ba43f7504ad8f58a62ca0615b1d7e572cb0f1780bc607252c39e9ef" dependencies = [ "either", "fnv", @@ -9346,21 +9421,19 @@ dependencies = [ "parking_lot 0.12.3", "pin-project 1.1.9", "quick-protobuf", - "rand", + "rand 0.8.5", "rw-stream-sink", - "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", "unsigned-varint 0.8.0", - "void", "web-time", ] [[package]] name = "libp2p-dcutr" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3236a2e24cbcf2d05b398b003ed920e1e8cedede13784d90fa3961b109647ce0" +checksum = "0a6c2c365b66866da34d06dfe41e001b49b9cfb5cafff6b9c4718eb2da7e35a4" dependencies = [ "asynchronous-codec", "either", @@ -9373,17 +9446,16 @@ dependencies = [ "lru 0.12.5", "quick-protobuf", "quick-protobuf-codec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", - "void", "web-time", ] [[package]] name = "libp2p-dns" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" +checksum = "1b780a1150214155b0ed1cdf09fbd2e1b0442604f9146a431d1b21d23eef7bd7" dependencies = [ "async-trait", "futures", @@ -9397,10 +9469,11 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4e830fdf24ac8c444c12415903174d506e1e077fbe3875c404a78c5935a8543" +checksum = "d558548fa3b5a8e9b66392f785921e363c57c05dcadfda4db0d41ae82d313e4a" dependencies = [ + "async-channel", "asynchronous-codec", "base64 0.22.1", "byteorder", @@ -9408,8 +9481,9 @@ dependencies = [ "either", "fnv", "futures", - "futures-ticker", + "futures-timer", "getrandom 0.2.15", + "hashlink", "hex_fmt", "libp2p-core", "libp2p-identity", @@ -9417,20 +9491,19 @@ dependencies = [ "prometheus-client", "quick-protobuf", "quick-protobuf-codec", - "rand", + "rand 0.8.5", "regex", + "serde", "sha2 0.10.8", - "smallvec", "tracing", - "void", "web-time", ] [[package]] name = "libp2p-identify" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" +checksum = "e8c06862544f02d05d62780ff590cc25a75f5c2b9df38ec7a370dcae8bb873cf" dependencies = [ "asynchronous-codec", "either", @@ -9440,13 +9513,11 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru 0.12.5", "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", - "void", ] [[package]] @@ -9460,7 +9531,8 @@ dependencies = [ "hkdf", "multihash", "quick-protobuf", - "rand", + "rand 0.8.5", + "serde", "sha2 0.10.8", "thiserror 1.0.69", "tracing", @@ -9469,11 +9541,10 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.46.2" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" +checksum = "2bab0466a27ebe955bcbc27328fae5429c5b48c915fd6174931414149802ec23" dependencies = [ - "arrayvec 0.7.6", "asynchronous-codec", "bytes", "either", @@ -9486,42 +9557,40 @@ dependencies = [ "libp2p-swarm", "quick-protobuf", "quick-protobuf-codec", - "rand", + "rand 0.8.5", + "serde", "sha2 0.10.8", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", - "uint 0.9.5", - "void", + "uint 0.10.0", "web-time", ] [[package]] name = "libp2p-mdns" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" +checksum = "11d0ba095e1175d797540e16b62e7576846b883cb5046d4159086837b36846cc" dependencies = [ - "data-encoding", "futures", "hickory-proto", "if-watch", "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand", + "rand 0.8.5", "smallvec", "socket2", "tokio", "tracing", - "void", ] [[package]] name = "libp2p-metrics" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" +checksum = "2ce58c64292e87af624fcb86465e7dd8342e46a388d71e8fec0ab37ee789630a" dependencies = [ "futures", "libp2p-core", @@ -9540,13 +9609,12 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" +checksum = "afcc133e0f3cea07acde6eb8a9665cb11b600bd61110b010593a0210b8153b16" dependencies = [ "asynchronous-codec", "bytes", - "curve25519-dalek", "futures", "libp2p-core", "libp2p-identity", @@ -9554,11 +9622,10 @@ dependencies = [ "multihash", "once_cell", "quick-protobuf", - "rand", - "sha2 0.10.8", + "rand 0.8.5", "snow", "static_assertions", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", "x25519-dalek", "zeroize", @@ -9566,51 +9633,47 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "005a34420359223b974ee344457095f027e51346e992d1e0dcd35173f4cdd422" +checksum = "7b2529993ff22deb2504c0130a58b60fb77f036be555053922db1a0490b5798b" dependencies = [ - "either", "futures", "futures-timer", "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand", + "rand 0.8.5", "tracing", - "void", "web-time", ] [[package]] name = "libp2p-quic" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" +checksum = "41432a159b00424a0abaa2c80d786cddff81055ac24aa127e0cf375f7858d880" dependencies = [ - "bytes", "futures", "futures-timer", "if-watch", "libp2p-core", "libp2p-identity", "libp2p-tls", - "parking_lot 0.12.3", "quinn", - "rand", - "ring 0.17.8", + "rand 0.8.5", + "ring 0.17.9", "rustls 0.23.23", "socket2", - "thiserror 1.0.69", + "thiserror 2.0.11", "tokio", "tracing", ] [[package]] name = "libp2p-relay" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10df23d7f5b5adcc129f4a69d6fbd05209e356ccf9e8f4eb10b2692b79c77247" +checksum = "08a41e346681395877118c270cf993f90d57d045fbf0913ca2f07b59ec6062e4" dependencies = [ "asynchronous-codec", "bytes", @@ -9623,41 +9686,37 @@ dependencies = [ "libp2p-swarm", "quick-protobuf", "quick-protobuf-codec", - "rand", + "rand 0.8.5", "static_assertions", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", - "void", "web-time", ] [[package]] name = "libp2p-request-response" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" +checksum = "548fe44a80ff275d400f1b26b090d441d83ef73efabbeb6415f4ce37e5aed865" dependencies = [ "async-trait", "cbor4ii", "futures", "futures-bounded", - "futures-timer", "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand", + "rand 0.8.5", "serde", "smallvec", "tracing", - "void", - "web-time", ] [[package]] name = "libp2p-swarm" -version = "0.45.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" +checksum = "803399b4b6f68adb85e63ab573ac568154b193e9a640f03e0f2890eabbcb37f8" dependencies = [ "either", "fnv", @@ -9669,11 +9728,10 @@ dependencies = [ "lru 0.12.5", "multistream-select", "once_cell", - "rand", + "rand 0.8.5", "smallvec", "tokio", "tracing", - "void", "web-time", ] @@ -9691,16 +9749,15 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" +checksum = "65346fb4d36035b23fec4e7be4c320436ba53537ce9b6be1d1db1f70c905cad0" dependencies = [ "futures", "futures-timer", "if-watch", "libc", "libp2p-core", - "libp2p-identity", "socket2", "tokio", "tracing", @@ -9708,28 +9765,28 @@ dependencies = [ [[package]] name = "libp2p-tls" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" +checksum = "dcaebc1069dea12c5b86a597eaaddae0317c2c2cb9ec99dc94f82fd340f5c78b" dependencies = [ "futures", "futures-rustls", "libp2p-core", "libp2p-identity", "rcgen", - "ring 0.17.8", + "ring 0.17.9", "rustls 0.23.23", "rustls-webpki 0.101.7", - "thiserror 1.0.69", + "thiserror 2.0.11", "x509-parser", "yasna", ] [[package]] name = "libp2p-upnp" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" +checksum = "d457b9ecceb66e7199f049926fad447f1f17f040e8d29d690c086b4cab8ed14a" dependencies = [ "futures", "futures-timer", @@ -9738,19 +9795,18 @@ dependencies = [ "libp2p-swarm", "tokio", "tracing", - "void", ] [[package]] name = "libp2p-yamux" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" +checksum = "f15df094914eb4af272acf9adaa9e287baa269943f32ea348ba29cfb9bfc60d8" dependencies = [ "either", "futures", "libp2p-core", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", "yamux 0.12.1", "yamux 0.13.4", @@ -9780,7 +9836,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand", + "rand 0.8.5", "serde", "sha2 0.9.9", "typenum", @@ -9862,12 +9918,6 @@ dependencies = [ "cc", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linregress" version = "0.5.4" @@ -9965,6 +10015,19 @@ version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if 1.0.0", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber 0.3.19", +] + [[package]] name = "lru" version = "0.8.1" @@ -9983,15 +10046,6 @@ dependencies = [ "hashbrown 0.15.2", ] -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "lru-mem" version = "0.3.0" @@ -10158,7 +10212,7 @@ checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" dependencies = [ "byteorder", "keccak", - "rand_core", + "rand_core 0.6.4", "zeroize", ] @@ -10208,7 +10262,7 @@ dependencies = [ "ordered-float 4.6.0", "quanta", "radix_trie", - "rand", + "rand 0.8.5", "rand_xoshiro", "sketches-ddsketch", ] @@ -10264,6 +10318,25 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot 0.12.3", + "portable-atomic", + "rustc_version 0.4.1", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid 1.13.1", +] + [[package]] name = "multi-stash" version = "0.2.0" @@ -10346,7 +10419,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bddcd3bf5144b6392de80e04c347cd7fab2508f6df16a85fc496ecd5cec39bc" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] @@ -10588,9 +10661,9 @@ dependencies = [ [[package]] name = "ntex-h2" -version = "1.8.4" +version = "1.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001e063ea58426e72340ba87a66498fc0d59659ee527943d64327b5cd1245c06" +checksum = "8f17e4eec506679d78f67b32c42ee4e35d0dcd97451251a4d891184185187520" dependencies = [ "bitflags 2.8.0", "fxhash", @@ -11002,9 +11075,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.70" +version = "0.10.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" +checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd" dependencies = [ "bitflags 2.8.0", "cfg-if 1.0.0", @@ -11043,9 +11116,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.105" +version = "0.9.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b22d5b84be05a8d6947c7cb71f7c849aa0f112acd4bf51c2a7c1c988ac0a9dc" +checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" dependencies = [ "cc", "libc", @@ -11345,9 +11418,9 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "39.0.0" +version = "39.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6945b078919acb14d126490e4b0973a688568b30142476ca69c6df2bed27ad" +checksum = "bcb1f72d7048fbd11e884b4693f7d438b8202340ff252e2a402e04c638fe2d02" dependencies = [ "docify", "frame-benchmarking", @@ -11561,7 +11634,7 @@ dependencies = [ "pallet-balances", "pallet-session", "parity-scale-codec", - "rand", + "rand 0.8.5", "scale-info", "sp-runtime", "sp-staking 36.0.0", @@ -11617,7 +11690,7 @@ dependencies = [ "pallet-contracts-uapi", "parity-scale-codec", "paste", - "rand", + "rand 0.8.5", "scale-info", "serde", "smallvec", @@ -11790,7 +11863,7 @@ dependencies = [ "log", "pallet-election-provider-support-benchmarking", "parity-scale-codec", - "rand", + "rand 0.8.5", "scale-info", "sp-arithmetic", "sp-core", @@ -12632,7 +12705,7 @@ dependencies = [ "pallet-session", "pallet-staking", "parity-scale-codec", - "rand", + "rand 0.8.5", "sp-runtime", "sp-session", ] @@ -12661,7 +12734,7 @@ dependencies = [ "frame-system", "log", "parity-scale-codec", - "rand_chacha", + "rand_chacha 0.3.1", "scale-info", "sp-arithmetic", "sp-io", @@ -12994,9 +13067,9 @@ dependencies = [ [[package]] name = "pallet-xcm-bridge-hub" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f336403f9e9bf22a0e1fdb90aa5093c52599c9a0639591fbcc1e979b58862d1b" +checksum = "3bdb76fff08633830063a4cb36664f0cf2f926ac0da02ee439d4f521763e26b7" dependencies = [ "bp-messages", "bp-runtime", @@ -13104,8 +13177,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "serde", "unicode-normalization", ] @@ -13266,7 +13339,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -13277,7 +13350,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -13470,7 +13543,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", - "rand", + "rand 0.8.5", ] [[package]] @@ -13760,8 +13833,8 @@ dependencies = [ "polkadot-parachain-primitives", "polkadot-primitives 16.0.0", "polkadot-runtime-metrics", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "scale-info", "serde", "sp-api", @@ -14276,7 +14349,7 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -14379,9 +14452,9 @@ dependencies = [ [[package]] name = "proc-macro-warning" -version = "1.0.2" +version = "1.84.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "834da187cfe638ae8abb0203f0b33e5ccdb02a28e7199f2f47b3e2754f50edca" +checksum = "75eea531cfcd120e0851a3f8aed42c4841f78c889eefafd96339c72677ae42c3" dependencies = [ "proc-macro2", "quote", @@ -14437,8 +14510,8 @@ dependencies = [ "bitflags 2.8.0", "lazy_static", "num-traits", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -14448,9 +14521,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", "prost-derive", @@ -14458,12 +14531,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.98", @@ -14471,18 +14544,18 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" dependencies = [ "prost", ] [[package]] name = "psm" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200b9ff220857e53e184257720a14553b2f4aa02577d2ed9842d45d4b9654810" +checksum = "f58e5423e24c18cc840e1c98370b3993c6649cd1678b4d24318bcf0a083cbe88" dependencies = [ "cc", ] @@ -14557,8 +14630,8 @@ checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", "getrandom 0.2.15", - "rand", - "ring 0.17.8", + "rand 0.8.5", + "ring 0.17.9", "rustc-hash 2.1.1", "rustls 0.23.23", "rustls-pki-types", @@ -14571,9 +14644,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" +checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" dependencies = [ "cfg_aliases", "libc", @@ -14615,11 +14688,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "serde", ] +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.1", + "zerocopy 0.8.18", +] + [[package]] name = "rand_chacha" version = "0.3.1" @@ -14627,7 +14711,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.1", ] [[package]] @@ -14639,13 +14733,34 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "rand_core" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a88e0da7a2c97baa202165137c158d0a2e824ac465d13d81046727b34cb247d3" +dependencies = [ + "getrandom 0.3.1", + "zerocopy 0.8.18", +] + +[[package]] +name = "rand_dev" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbee97c27dada05f03db49ffe6516872f6c926e0fd525f9ce0cb3c051adf145c" +dependencies = [ + "hex", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + [[package]] name = "rand_xorshift" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -14654,14 +14769,14 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] name = "raw-cpuid" -version = "11.3.0" +version = "11.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6928fa44c097620b706542d428957635951bade7143269085389d42c8a4927e" +checksum = "529468c1335c1c03919960dfefdb1b3648858c20d7ec2d0663e728e4a717efbc" dependencies = [ "bitflags 2.8.0", ] @@ -14748,6 +14863,17 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "redox_users" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" +dependencies = [ + "getrandom 0.2.15", + "libredox", + "thiserror 2.0.11", +] + [[package]] name = "ref-cast" version = "1.0.23" @@ -15019,15 +15145,14 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.8" +version = "0.17.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "e75ec5e92c4d8aede845126adc388046234541629e76029599ed35a003c7ed24" dependencies = [ "cc", "cfg-if 1.0.0", "getrandom 0.2.15", "libc", - "spin 0.9.8", "untrusted 0.9.0", "windows-sys 0.52.0", ] @@ -15090,6 +15215,8 @@ dependencies = [ "phantom-type", "round-based-derive", "thiserror 2.0.11", + "tokio", + "tokio-stream", "tracing", ] @@ -15138,7 +15265,7 @@ dependencies = [ "parity-scale-codec", "primitive-types 0.12.2", "proptest", - "rand", + "rand 0.8.5", "rlp", "ruint-macro", "serde", @@ -15180,7 +15307,7 @@ dependencies = [ "num-bigint 0.4.6", "num-traits", "pbkdf2 0.12.2", - "rand", + "rand 0.8.5", "scrypt 0.11.0", "serde", "serde_json", @@ -15293,7 +15420,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring 0.17.8", + "ring 0.17.9", "rustls-webpki 0.101.7", "sct", ] @@ -15307,7 +15434,7 @@ dependencies = [ "aws-lc-rs", "log", "once_cell", - "ring 0.17.8", + "ring 0.17.9", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle", @@ -15411,7 +15538,7 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.8", + "ring 0.17.9", "untrusted 0.9.0", ] @@ -15422,7 +15549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "aws-lc-rs", - "ring 0.17.8", + "ring 0.17.9", "rustls-pki-types", "untrusted 0.9.0", ] @@ -15766,13 +15893,19 @@ dependencies = [ "curve25519-dalek", "getrandom_or_panic", "merlin", - "rand_core", + "rand_core 0.6.4", "serde_bytes", "sha2 0.10.8", "subtle", "zeroize", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -15815,7 +15948,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.8", + "ring 0.17.9", "untrusted 0.9.0", ] @@ -15850,7 +15983,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252" dependencies = [ "bitcoin_hashes 0.14.0", - "rand", + "rand 0.8.5", "secp256k1-sys 0.10.1", ] @@ -16274,7 +16407,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -16360,9 +16493,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" dependencies = [ "serde", ] @@ -16431,8 +16564,8 @@ dependencies = [ "pbkdf2 0.12.2", "pin-project 1.1.9", "poly1305", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "ruzstd", "schnorrkel", "serde", @@ -16474,8 +16607,8 @@ dependencies = [ "lru 0.12.5", "parking_lot 0.12.3", "pin-project 1.1.9", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "serde", "serde_json", "siphasher", @@ -16495,8 +16628,8 @@ dependencies = [ "blake2", "chacha20poly1305", "curve25519-dalek", - "rand_core", - "ring 0.17.8", + "rand_core 0.6.4", + "ring 0.17.9", "rustc_version 0.4.1", "sha2 0.10.8", "subtle", @@ -16589,7 +16722,7 @@ dependencies = [ "hex", "lazy_static", "parity-scale-codec", - "rand", + "rand 0.8.5", "scale-info", "snowbridge-amcl", "zeroize", @@ -16847,7 +16980,7 @@ dependencies = [ "futures", "httparse", "log", - "rand", + "rand 0.8.5", "sha1", ] @@ -17083,7 +17216,7 @@ dependencies = [ "parking_lot 0.12.3", "paste", "primitive-types 0.12.2", - "rand", + "rand 0.8.5", "scale-info", "schnorrkel", "secp256k1 0.28.2", @@ -17357,7 +17490,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "paste", - "rand", + "rand 0.8.5", "scale-info", "serde", "simple-mermaid", @@ -17457,7 +17590,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "smallvec", "sp-core", "sp-externalities", @@ -17479,7 +17612,7 @@ dependencies = [ "ed25519-dalek", "hkdf", "parity-scale-codec", - "rand", + "rand 0.8.5", "scale-info", "sha2 0.10.8", "sp-api", @@ -17575,7 +17708,7 @@ dependencies = [ "nohash-hasher", "parity-scale-codec", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "scale-info", "schnellru", "sp-core", @@ -17746,9 +17879,9 @@ dependencies = [ [[package]] name = "staging-xcm-builder" -version = "17.0.3" +version = "17.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6f7a92cfaec55a5ed0f9cbbb9076aa8ec0aff1ba90b9804cc5c8f2369fde59c" +checksum = "e1693870a07e3fd8115c02b44e1223ce149b6cfa0b60f59a1c0fbc26637766a5" dependencies = [ "frame-support", "frame-system", @@ -18118,7 +18251,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11297baafe5fa0c99d5722458eac6a5e25c01eb1b8e5cd137f54079093daa7a4" dependencies = [ - "dirs", + "dirs 5.0.1", "fs2", "hex", "once_cell", @@ -18271,6 +18404,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tangle-subxt" version = "0.11.0" @@ -18544,9 +18683,9 @@ dependencies = [ "arrayref", "constcat", "digest 0.10.7", - "rand", - "rand_chacha", - "rand_core", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "sha2 0.10.8", "sha3", "thiserror 1.0.69", @@ -18810,7 +18949,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project 1.1.9", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util 0.7.13", @@ -19023,7 +19162,7 @@ dependencies = [ "http 0.2.12", "httparse", "log", - "rand", + "rand 0.8.5", "rustls 0.21.12", "sha1", "thiserror 1.0.69", @@ -19043,7 +19182,7 @@ dependencies = [ "http 1.2.0", "httparse", "log", - "rand", + "rand 0.8.5", "rustls 0.23.23", "rustls-pki-types", "sha1", @@ -19065,7 +19204,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if 1.0.0", "digest 0.10.7", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -19301,12 +19440,6 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" -[[package]] -name = "void" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" - [[package]] name = "vsimd" version = "0.8.0" @@ -19328,9 +19461,9 @@ dependencies = [ "arrayref", "constcat", "digest 0.10.7", - "rand", - "rand_chacha", - "rand_core", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "sha2 0.10.8", "sha3", "thiserror 1.0.69", @@ -19747,7 +19880,7 @@ dependencies = [ "memfd", "memoffset 0.8.0", "paste", - "rand", + "rand 0.8.5", "rustix 0.36.17", "wasmtime-asm-macros", "wasmtime-environ", @@ -19912,6 +20045,16 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.52.0" @@ -19937,12 +20080,25 @@ version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ - "windows-implement", - "windows-interface", + "windows-implement 0.57.0", + "windows-interface 0.57.0", "windows-result 0.1.2", "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement 0.58.0", + "windows-interface 0.58.0", + "windows-result 0.2.0", + "windows-strings", + "windows-targets 0.52.6", +] + [[package]] name = "windows-implement" version = "0.57.0" @@ -19954,6 +20110,17 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "windows-interface" version = "0.57.0" @@ -19965,6 +20132,17 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "windows-registry" version = "0.2.0" @@ -20302,7 +20480,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ "curve25519-dalek", - "rand_core", + "rand_core 0.6.4", "serde", "zeroize", ] @@ -20417,7 +20595,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.3", "pin-project 1.1.9", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -20432,7 +20610,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.3", "pin-project 1.1.9", - "rand", + "rand 0.8.5", "static_assertions", "web-time", ] @@ -20489,7 +20667,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79386d31a42a4996e3336b0919ddb90f81112af416270cff95b5f5af22b839c2" +dependencies = [ + "zerocopy-derive 0.8.18", ] [[package]] @@ -20503,6 +20690,17 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "zerocopy-derive" +version = "0.8.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76331675d372f91bf8d17e13afbd5fe639200b73d01f0fc748bb059f9cca2db7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "zerofrom" version = "0.1.5" diff --git a/Cargo.toml b/Cargo.toml index 520139240..b9d76f626 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,7 @@ [workspace] resolver = "2" -members = [ - "cli", - "blueprints/*", - "crates/*", -] -exclude = [ - "blueprints/incredible-squaring-symbiotic", -] +members = ["cli", "blueprints/*", "crates/*"] +exclude = ["blueprints/incredible-squaring-symbiotic"] [workspace.package] authors = ["Tangle Network"] @@ -28,6 +22,7 @@ all = { level = "deny", priority = -1 } single_match_else = "allow" uninlined_format_args = "allow" needless_late_init = "allow" +struct_excessive_bools = "allow" [workspace.lints.rustdoc] broken_intra_doc_links = "deny" @@ -68,7 +63,6 @@ gadget-clients = { version = "0.1.0", path = "./crates/clients", default-feature gadget-client-core = { version = "0.1.0", path = "./crates/clients/core", default-features = false } gadget-client-eigenlayer = { version = "0.1.0", path = "./crates/clients/eigenlayer", default-features = false } gadget-client-evm = { version = "0.1.0", path = "./crates/clients/evm", default-features = false } -gadget-client-networking = { version = "0.1.0", path = "./crates/clients/networking", default-features = false } gadget-client-tangle = { version = "0.1.0", path = "./crates/clients/tangle", default-features = false } gadget-contexts = { version = "0.1.0", path = "./crates/contexts", default-features = false } @@ -104,9 +98,12 @@ gadget-runner-symbiotic = { version = "0.1.0", path = "./crates/runners/symbioti gadget-config = { version = "0.1.0", path = "./crates/config", default-features = false } gadget-keystore = { version = "0.1.0", path = "./crates/keystore", default-features = false } gadget-logging = { version = "0.1.0", path = "./crates/logging", default-features = false } -gadget-networking = { version = "0.1.0", path = "./crates/networking", default-features = false } gadget-std = { version = "0.1.0", path = "./crates/std", default-features = false } +# P2P +gadget-networking = { version = "0.1.0", path = "./crates/networking", default-features = false } +gadget-networking-round-based-extension = { version = "0.1.0", path = "./crates/networking/extensions/round-based", default-features = false } + # Utilities gadget-utils = { version = "0.1.0", path = "./crates/utils", default-features = false } gadget-utils-evm = { version = "0.1.0", path = "./crates/utils/evm", default-features = false } @@ -140,11 +137,14 @@ sp-runtime = { version = "39.0.0", default-features = false } # Async & Runtime async-trait = { version = "0.1.86", default-features = false } +crossbeam = { version = "0.8", default-features = false } +crossbeam-channel = { version = "0.5", default-features = false } futures = { version = "0.3.30", default-features = false } futures-util = { version = "0.3.31", default-features = false } tokio = { version = "1.40", default-features = false } tokio-util = { version = "0.7.12", default-features = false } tokio-cron-scheduler = "0.13.0" +tokio-stream = { version = "0.1.17", default-features = false } # CLI & Configuration cargo-generate = { version = "0.22.1", default-features = false } diff --git a/blueprints/examples/src/lib.rs b/blueprints/examples/src/lib.rs index 05d044406..0226a863d 100644 --- a/blueprints/examples/src/lib.rs +++ b/blueprints/examples/src/lib.rs @@ -1,6 +1,5 @@ pub mod eigen_context; pub mod periodic_web_poller; -pub mod raw_tangle_events; pub mod services_context; #[cfg(test)] diff --git a/blueprints/examples/src/main.rs b/blueprints/examples/src/main.rs index 793a20c55..d1a5b18b9 100644 --- a/blueprints/examples/src/main.rs +++ b/blueprints/examples/src/main.rs @@ -1,4 +1,4 @@ -use blueprint_examples::{eigen_context, periodic_web_poller, raw_tangle_events, services_context}; +use blueprint_examples::{eigen_context, periodic_web_poller, services_context}; use blueprint_sdk::alloy::primitives::Address; use blueprint_sdk::logging::info; use blueprint_sdk::runners::core::runner::BlueprintRunner; @@ -19,7 +19,6 @@ async fn main() -> Result<(), Box> { "tangle" => { info!("Running Tangle examples"); BlueprintRunner::new(TangleConfig::default(), env.clone()) - .job(raw_tangle_events::constructor(env.clone()).await?) .job(periodic_web_poller::constructor("1/2 * * * * *")) .job(services_context::constructor(env.clone()).await?) .run() diff --git a/blueprints/examples/src/raw_tangle_events.rs b/blueprints/examples/src/raw_tangle_events.rs deleted file mode 100644 index dbb249a33..000000000 --- a/blueprints/examples/src/raw_tangle_events.rs +++ /dev/null @@ -1,64 +0,0 @@ -use blueprint_sdk::config::GadgetConfiguration; -use blueprint_sdk::contexts::keystore::KeystoreContext; -use blueprint_sdk::crypto::sp_core::SpSr25519; -use blueprint_sdk::event_listeners::core::InitializableEventHandler; -use blueprint_sdk::event_listeners::tangle::events::{TangleEvent, TangleEventListener}; -use blueprint_sdk::job; -use blueprint_sdk::keystore::backends::Backend; -use blueprint_sdk::logging::info; -use blueprint_sdk::macros::contexts::{ServicesContext, TangleClientContext}; -use blueprint_sdk::tangle_subxt::tangle_testnet_runtime::api; - -#[derive(Clone, TangleClientContext, ServicesContext)] -pub struct MyContext { - #[config] - sdk_config: GadgetConfiguration, - #[call_id] - call_id: Option, -} - -pub async fn constructor( - env: GadgetConfiguration, -) -> color_eyre::Result { - let signer = env - .clone() - .keystore() - .first_local::() - .map_err(|e| color_eyre::eyre::eyre!(e))?; - - info!("Starting the event watcher for {:?} ...", signer.0); - RawEventHandler::new( - &env, - MyContext { - sdk_config: env.clone(), - call_id: None, - }, - ) - .await - .map_err(|e| color_eyre::eyre::eyre!(e)) -} - -#[job( - id = 2, - event_listener( - listener = TangleEventListener, - ), -)] -pub fn raw(event: TangleEvent, context: MyContext) -> Result { - if let Some(balance_transfer) = event - .evt - .as_event::() - .ok() - .flatten() - { - info!("Found a balance transfer: {balance_transfer:?}"); - - let result = std::env::var("RAW_EVENT_RESULT").unwrap_or("0".to_string()); - let result = result.parse::().unwrap_or(0); - let result = result + 1; - std::env::set_var("RAW_EVENT_RESULT", result.to_string()); - - return Ok(1); - } - Ok(0) -} diff --git a/crates/clients/Cargo.toml b/crates/clients/Cargo.toml index b080eb982..eae5fe236 100644 --- a/crates/clients/Cargo.toml +++ b/crates/clients/Cargo.toml @@ -12,7 +12,6 @@ repository.workspace = true gadget-client-eigenlayer = { workspace = true, optional = true } gadget-client-evm = { workspace = true, optional = true } gadget-client-tangle = { workspace = true, optional = true } -gadget-client-networking = { workspace = true, optional = true } gadget-client-core = { workspace = true } gadget-std.workspace = true @@ -23,14 +22,13 @@ default = ["std"] std = [ "gadget-client-eigenlayer?/std", "gadget-client-evm?/std", - "gadget-client-networking?/std", "gadget-client-tangle?/std", "gadget-std/std", - "thiserror/std" + "thiserror/std", ] web = ["gadget-client-tangle?/web"] eigenlayer = ["dep:gadget-client-eigenlayer"] evm = ["dep:gadget-client-evm"] -networking = ["dep:gadget-client-networking"] -tangle = ["dep:gadget-client-tangle"] \ No newline at end of file +tangle = ["dep:gadget-client-tangle"] +networking = [] diff --git a/crates/clients/networking/Cargo.toml b/crates/clients/networking/Cargo.toml deleted file mode 100644 index 1b37a9963..000000000 --- a/crates/clients/networking/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "gadget-client-networking" -version = "0.1.0" -description = "Networking client for Tangle Blueprints" -authors.workspace = true -edition.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[dependencies] -gadget-config = { workspace = true, features = ["networking"] } -gadget-crypto = { workspace = true, features = ["k256"] } -gadget-logging = { workspace = true } -gadget-networking = { workspace = true, features = ["round-based-compat"] } -gadget-client-core = { workspace = true } -gadget-std = { workspace = true } -libp2p = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true, features = ["alloc"] } -thiserror = { workspace = true } - -[features] -default = ["std"] -std = [ - "gadget-config/std", - "gadget-crypto/std", - "gadget-logging/std", - "gadget-client-core/std", - "gadget-networking/std", - "gadget-std/std", - "serde/std", - "serde_json/std", -] \ No newline at end of file diff --git a/crates/clients/networking/src/error.rs b/crates/clients/networking/src/error.rs deleted file mode 100644 index 8d1e18031..000000000 --- a/crates/clients/networking/src/error.rs +++ /dev/null @@ -1,22 +0,0 @@ -use gadget_std::string::String; -use thiserror::Error; - -#[derive(Debug, Error)] -pub enum Error { - #[error("P2P error: {0}")] - P2p(String), - #[error("Transport error: {0}")] - Transport(String), - #[error("Protocol error: {0}")] - Protocol(String), - #[error("Configuration error: {0}")] - Configuration(String), -} - -impl From for gadget_client_core::error::Error { - fn from(value: Error) -> Self { - gadget_client_core::error::Error::Network(value.to_string()) - } -} - -pub type Result = gadget_std::result::Result; diff --git a/crates/clients/networking/src/lib.rs b/crates/clients/networking/src/lib.rs deleted file mode 100644 index 893f5304b..000000000 --- a/crates/clients/networking/src/lib.rs +++ /dev/null @@ -1,4 +0,0 @@ -#![cfg_attr(not(feature = "std"), no_std)] - -pub mod error; -pub mod p2p; diff --git a/crates/clients/networking/src/p2p.rs b/crates/clients/networking/src/p2p.rs deleted file mode 100644 index 266a1c00f..000000000 --- a/crates/clients/networking/src/p2p.rs +++ /dev/null @@ -1,117 +0,0 @@ -use crate::error::{Error, Result}; -use gadget_config::GadgetConfiguration; -use gadget_networking::gossip::GossipHandle; -use gadget_networking::round_based_compat::NetworkDeliveryWrapper; -use gadget_networking::setup::NetworkConfig; -use gadget_networking::{networking::NetworkMultiplexer, round_based}; -use gadget_networking::{GossipMsgKeyPair, GossipMsgPublicKey}; -use gadget_std::collections::BTreeMap; -use gadget_std::sync::Arc; -use round_based::PartyIndex; - -pub struct P2PClient { - name: String, - config: GadgetConfiguration, - target_port: u16, - gossip_msg_keypair: GossipMsgKeyPair, -} - -impl P2PClient { - pub fn new( - name: String, - config: GadgetConfiguration, - target_port: u16, - gossip_msg_keypair: GossipMsgKeyPair, - ) -> Self { - Self { - name, - config, - target_port, - gossip_msg_keypair, - } - } - - pub fn config(&self) -> &GadgetConfiguration { - &self.config - } - - /// Returns the network protocol identifier - pub fn network_protocol(&self, version: Option) -> String { - let name = self.name.to_lowercase(); - match version { - Some(v) => format!("/{}/{}", name, v), - None => format!("/{}/1.0.0", name), - } - } - - pub fn libp2p_identity(&self, ed25519_seed: Vec) -> Result { - let mut seed_bytes = ed25519_seed; - let keypair = libp2p::identity::Keypair::ed25519_from_bytes(&mut seed_bytes) - .map_err(|err| Error::Configuration(err.to_string()))?; - Ok(keypair) - } - - /// Returns a new `NetworkConfig` for the current environment. - pub fn libp2p_network_config>( - &self, - network_name: T, - ed25519_seed: Vec, - ) -> Result { - let network_identity = self.libp2p_identity(ed25519_seed)?; - let network_config = NetworkConfig::new_service_network( - network_identity, - self.gossip_msg_keypair.clone(), - self.config.bootnodes.clone(), - self.target_port, - network_name, - ); - - Ok(network_config) - } - - /// Starts the P2P network and returns the gossip handle - pub fn start_p2p_network>( - &self, - network_name: T, - ed25519_seed: Vec, - ) -> Result { - let network_config = self.libp2p_network_config(network_name, ed25519_seed)?; - match gadget_networking::setup::start_p2p_network(network_config) { - Ok(handle) => Ok(handle), - Err(err) => { - gadget_logging::error!("Failed to start network: {}", err.to_string()); - Err(Error::Protocol(format!("Failed to start network: {err}"))) - } - } - } - - /// Creates a network multiplexer backend - pub fn create_network_multiplexer>( - &self, - network_name: T, - ed25519_seed: Vec, - ) -> Result> { - let handle = self.start_p2p_network(network_name, ed25519_seed)?; - Ok(Arc::new(NetworkMultiplexer::new(handle))) - } - - /// Creates a network delivery wrapper - pub fn create_network_delivery_wrapper( - &self, - mux: Arc, - party_index: PartyIndex, - task_hash: [u8; 32], - parties: BTreeMap, - ) -> NetworkDeliveryWrapper - where - M: Clone - + Send - + Unpin - + 'static - + serde::Serialize - + serde::de::DeserializeOwned - + round_based::ProtocolMessage, - { - NetworkDeliveryWrapper::new(mux, party_index, task_hash, parties) - } -} diff --git a/crates/clients/tangle/src/client.rs b/crates/clients/tangle/src/client.rs index d790568fa..cac1a9f6f 100644 --- a/crates/clients/tangle/src/client.rs +++ b/crates/clients/tangle/src/client.rs @@ -45,8 +45,6 @@ pub struct TangleClient { services_client: TangleServicesClient, } -const KEY_ID: &str = "tangle-default"; - impl TangleClient { /// Create a new Tangle runtime client from an existing [`GadgetConfiguration`]. pub async fn new(config: GadgetConfiguration) -> std::result::Result { @@ -154,16 +152,19 @@ impl TangleClient { Error, > { let parties = self.get_operators().await?; - let my_id = self.keystore.get_public_key_local::(KEY_ID)?; + let my_id = self + .keystore + .first_local::() + .map_err(Error::Keystore)?; gadget_logging::trace!( - "Looking for {my_id:?} in parties: {:?}", + "Looking for {my_id} in parties: {:?}", parties.keys().collect::>() ); let index_of_my_id = parties .iter() - .position(|(_id, key)| key == &my_id.0) + .position(|(id, _key)| id.0 == my_id.0.to_raw()) .ok_or(Error::PartyNotFound)?; Ok((index_of_my_id, parties)) @@ -303,7 +304,7 @@ impl GadgetServicesClient for TangleClient { async fn operator_id( &self, ) -> std::result::Result { - Ok(self.keystore.get_public_key_local::(KEY_ID)?.0) + Ok(self.keystore.first_local::()?.0) } /// Retrieves the current blueprint ID from the configuration diff --git a/crates/config/src/context_config.rs b/crates/config/src/context_config.rs index d751eb7bc..c508dc0b8 100644 --- a/crates/config/src/context_config.rs +++ b/crates/config/src/context_config.rs @@ -37,6 +37,18 @@ pub enum GadgetCLICoreSettings { #[arg(long, env)] #[serde(default)] network_bind_port: Option, + #[cfg(feature = "networking")] + #[arg(long, env)] + #[serde(default)] + enable_mdns: bool, + #[cfg(feature = "networking")] + #[arg(long, env)] + #[serde(default)] + enable_kademlia: bool, + #[cfg(feature = "networking")] + #[arg(long, env)] + #[serde(default)] + target_peer_count: Option, #[arg(long, short = 'd', env)] keystore_uri: String, #[arg(long, value_enum, env)] @@ -218,6 +230,12 @@ impl Default for GadgetCLICoreSettings { bootnodes: None, #[cfg(feature = "networking")] network_bind_port: None, + #[cfg(feature = "networking")] + enable_mdns: false, + #[cfg(feature = "networking")] + enable_kademlia: false, + #[cfg(feature = "networking")] + target_peer_count: None, keystore_uri: String::new(), chain: SupportedChains::default(), verbose: 0, @@ -342,6 +360,11 @@ impl ContextConfig { #[cfg(feature = "tangle")] let service_id = tangle_settings.and_then(|s| s.service_id); + #[cfg(feature = "networking")] + let enable_mdns = cfg!(debug_assertions); + #[cfg(feature = "networking")] + let enable_kademlia = !cfg!(debug_assertions); + ContextConfig { gadget_core_settings: GadgetCLICoreSettings::Run { test_mode: false, @@ -350,6 +373,12 @@ impl ContextConfig { bootnodes: None, #[cfg(feature = "networking")] network_bind_port: None, + #[cfg(feature = "networking")] + enable_mdns, + #[cfg(feature = "networking")] + enable_kademlia, + #[cfg(feature = "networking")] + target_peer_count: None, keystore_uri, chain, verbose: 3, diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index e7840107d..2f9a5683c 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -69,18 +69,28 @@ pub enum Error { /// Missing `SymbioticContractAddresses` #[error("Missing SymbioticContractAddresses")] MissingSymbioticContractAddresses, + + #[cfg(feature = "networking")] + #[error(transparent)] + Networking(#[from] gadget_networking::error::Error), + #[error("Bad RPC Connection: {0}")] BadRpcConnection(String), #[error("Configuration error: {0}")] ConfigurationError(String), + + #[cfg(feature = "networking")] + #[error("Failed to parse Multiaddr: {0}")] + Multiaddr(#[from] libp2p::multiaddr::Error), } #[cfg(feature = "networking")] pub use networking_imports::*; #[cfg(feature = "networking")] mod networking_imports { - pub use gadget_networking::networking::NetworkMultiplexer; - pub use gadget_networking::setup::start_p2p_network; + // pub use gadget_networking::networking::NetworkMultiplexer; + // pub use gadget_networking::start_p2p_network; + pub use gadget_networking::NetworkConfig; pub use libp2p::Multiaddr; pub use std::sync::Arc; } @@ -111,23 +121,30 @@ pub struct GadgetConfiguration { pub protocol_settings: ProtocolSettings, /// Whether the gadget is in test mode pub test_mode: bool, + /// Whether to enable mDNS + #[cfg(feature = "networking")] + pub enable_mdns: bool, + /// Whether to enable Kademlia + #[cfg(feature = "networking")] + pub enable_kademlia: bool, + /// The target number of peers to connect to + #[cfg(feature = "networking")] + pub target_peer_count: u32, } impl GadgetConfiguration { #[cfg(feature = "networking")] pub fn libp2p_start_network( &self, - network_name: impl Into, - ) -> Result, Error> { - tracing::info!(target: "gadget", "AB0"); - let network_config = self - .libp2p_network_config(network_name) - .map_err(|err| Error::ConfigurationError(err.to_string()))?; + network_config: gadget_networking::NetworkConfig, + allowed_keys: gadget_std::collections::HashSet, + ) -> Result { + let networking_service = + gadget_networking::NetworkService::new(network_config, allowed_keys)?; + + let handle = networking_service.start(); - tracing::info!(target: "gadget", "AB1"); - start_p2p_network(network_config) - .map_err(|err| Error::ConfigurationError(err.to_string())) - .map(|net| Arc::new(NetworkMultiplexer::new(net))) + Ok(handle) } /// Returns a new `NetworkConfig` for the current environment. @@ -135,7 +152,7 @@ impl GadgetConfiguration { pub fn libp2p_network_config( &self, network_name: impl Into, - ) -> Result { + ) -> Result { use gadget_keystore::backends::Backend; use gadget_keystore::crypto::sp_core::SpEd25519 as LibP2PKeyType; use gadget_networking::key_types::Curve as GossipMsgKeyPair; @@ -159,13 +176,22 @@ impl GadgetConfiguration { .get_secret::(&ecdsa_pub_key) .map_err(|err| Error::ConfigurationError(err.to_string()))?; - let network_config = gadget_networking::setup::NetworkConfig::new_service_network( - network_identity, - ecdsa_pair, - self.bootnodes.clone(), - self.network_bind_port, + let listen_addr: Multiaddr = format!("/ip4/0.0.0.0/tcp/{}", self.network_bind_port) + .parse() + .expect("valid multiaddr; qed"); + + let network_name: String = network_name.into(); + let network_config = NetworkConfig { + instance_id: network_name.clone(), network_name, - ); + instance_key_pair: ecdsa_pair, + local_key: network_identity, + listen_addr, + target_peer_count: self.target_peer_count, + bootstrap_peers: self.bootnodes.clone(), + enable_mdns: self.enable_mdns, + enable_kademlia: self.enable_kademlia, + }; Ok(network_config) } @@ -191,6 +217,12 @@ fn load_inner(config: ContextConfig) -> Result { bootnodes, #[cfg(feature = "networking")] network_bind_port, + #[cfg(feature = "networking")] + enable_mdns, + #[cfg(feature = "networking")] + enable_kademlia, + #[cfg(feature = "networking")] + target_peer_count, keystore_uri, protocol, #[cfg(feature = "tangle")] @@ -310,6 +342,12 @@ fn load_inner(config: ContextConfig) -> Result { bootnodes: bootnodes.unwrap_or_default(), #[cfg(feature = "networking")] network_bind_port: network_bind_port.unwrap_or_default(), + #[cfg(feature = "networking")] + enable_mdns, + #[cfg(feature = "networking")] + enable_kademlia, + #[cfg(feature = "networking")] + target_peer_count: target_peer_count.unwrap_or(24), protocol, protocol_settings, }) diff --git a/crates/contexts/Cargo.toml b/crates/contexts/Cargo.toml index 11356861e..9c7e88b84 100644 --- a/crates/contexts/Cargo.toml +++ b/crates/contexts/Cargo.toml @@ -22,19 +22,11 @@ tangle-subxt = { workspace = true, optional = true } [features] default = ["std", "keystore"] -std = [ - "gadget-std/std", - "gadget-clients/std", - "gadget-keystore?/std", - "gadget-networking?/std", - "tangle-subxt?/std", -] -web = [ - "tangle-subxt?/web", -] +std = ["gadget-std/std", "gadget-clients/std", "gadget-keystore?/std", "gadget-networking?/std", "tangle-subxt?/std"] +web = ["tangle-subxt?/web"] evm = ["gadget-clients/evm"] eigenlayer = ["gadget-clients/eigenlayer"] -networking = ["gadget-clients/networking", "dep:gadget-networking", "dep:proc-macro2"] +networking = ["dep:gadget-networking", "dep:proc-macro2"] keystore = ["dep:gadget-config", "dep:gadget-keystore"] -tangle = ["dep:tangle-subxt", "gadget-clients/tangle"] \ No newline at end of file +tangle = ["dep:tangle-subxt", "gadget-clients/tangle"] diff --git a/crates/contexts/src/lib.rs b/crates/contexts/src/lib.rs index 6a4c038a7..f3de4e538 100644 --- a/crates/contexts/src/lib.rs +++ b/crates/contexts/src/lib.rs @@ -4,8 +4,6 @@ pub mod eigenlayer; pub mod instrumented_evm_client; #[cfg(feature = "keystore")] pub mod keystore; -#[cfg(feature = "networking")] -pub mod p2p; #[cfg(feature = "tangle")] pub mod services; #[cfg(feature = "tangle")] diff --git a/crates/contexts/src/p2p.rs b/crates/contexts/src/p2p.rs index 6461f9001..e69de29bb 100644 --- a/crates/contexts/src/p2p.rs +++ b/crates/contexts/src/p2p.rs @@ -1,14 +0,0 @@ -pub use gadget_clients::networking::p2p::P2PClient; -pub use gadget_networking::GossipMsgKeyPair; -pub use gadget_std::net::IpAddr; -pub use proc_macro2; - -/// `P2pContext` trait provides access to a peer to peer networking client. -pub trait P2pContext { - fn p2p_client( - &self, - name: gadget_std::string::String, - target_port: u16, - my_ecdsa_key: GossipMsgKeyPair, - ) -> P2PClient; -} diff --git a/crates/crypto/k256/src/lib.rs b/crates/crypto/k256/src/lib.rs index ed85d095f..5afe60da6 100644 --- a/crates/crypto/k256/src/lib.rs +++ b/crates/crypto/k256/src/lib.rs @@ -13,44 +13,11 @@ use gadget_std::string::{String, ToString}; use gadget_std::UniformRand; use k256::ecdsa::signature::SignerMut; use k256::ecdsa::{SigningKey, VerifyingKey}; -use serde::{Deserialize, Serialize}; +use std::hash::{Hash, Hasher}; /// ECDSA key type pub struct K256Ecdsa; -#[derive(Clone, Copy, PartialEq, Eq, Debug, Serialize, Deserialize)] -pub struct K256VerifyingKey(pub VerifyingKey); - -impl From for VerifyingKey { - fn from(key: K256VerifyingKey) -> Self { - key.0 - } -} - -impl KeyEncoding for K256VerifyingKey { - fn to_bytes(&self) -> Vec { - self.0.to_sec1_bytes().to_vec() - } - - fn from_bytes(bytes: &[u8]) -> core::result::Result { - let vk = VerifyingKey::from_sec1_bytes(bytes) - .map_err(|e| serde::de::Error::custom(e.to_string()))?; - Ok(K256VerifyingKey(vk)) - } -} - -impl PartialOrd for K256VerifyingKey { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for K256VerifyingKey { - fn cmp(&self, other: &Self) -> gadget_std::cmp::Ordering { - self.0.to_sec1_bytes().cmp(&other.0.to_sec1_bytes()) - } -} - macro_rules! impl_serde_bytes { ($wrapper:ident, $inner:path) => { #[derive(Clone, PartialEq, Eq, Debug)] @@ -95,14 +62,42 @@ macro_rules! impl_serde_bytes { D: serde::Deserializer<'de>, { let bytes = Vec::::deserialize(deserializer)?; - let inner = <$inner>::from_slice(&bytes) + let ret = <$wrapper as KeyEncoding>::from_bytes(&bytes) .map_err(|e| serde::de::Error::custom(e.to_string()))?; - Ok($wrapper(inner)) + Ok(ret) + } + } + + impl gadget_std::fmt::Display for $wrapper { + fn fmt(&self, f: &mut gadget_std::fmt::Formatter<'_>) -> gadget_std::fmt::Result { + write!(f, "{}", hex::encode(self.to_bytes())) } } }; } +impl_serde_bytes!(K256VerifyingKey, k256::ecdsa::VerifyingKey); + +impl K256VerifyingKey { + fn to_bytes_impl(&self) -> Vec { + self.0.to_sec1_bytes().to_vec() + } + + fn from_bytes_impl(bytes: &[u8]) -> Result { + let vk = VerifyingKey::from_sec1_bytes(bytes) + .map_err(|e| K256Error::InvalidSigner(e.to_string()))?; + Ok(K256VerifyingKey(vk)) + } +} + +impl Hash for K256VerifyingKey { + fn hash(&self, state: &mut H) { + state.write(self.to_bytes().as_slice()); + } +} + +impl Copy for K256VerifyingKey {} + impl_serde_bytes!(K256SigningKey, k256::ecdsa::SigningKey); impl K256SigningKey { diff --git a/crates/crypto/sp-core/src/lib.rs b/crates/crypto/sp-core/src/lib.rs index ce2cbd02c..ce4c16c70 100644 --- a/crates/crypto/sp-core/src/lib.rs +++ b/crates/crypto/sp-core/src/lib.rs @@ -77,6 +77,12 @@ macro_rules! impl_sp_core_pair_public { #[derive(Clone, serde::Serialize, serde::Deserialize)] pub struct [](pub <$pair_type as sp_core::Pair>::Public); + impl gadget_std::hash::Hash for [] { + fn hash(&self, state: &mut H) { + self.0.to_raw_vec().hash(state); + } + } + impl KeyEncoding for [] { fn to_bytes(&self) -> Vec { self.0.to_raw_vec() @@ -112,6 +118,12 @@ macro_rules! impl_sp_core_pair_public { write!(f, "{:?}", self.to_bytes()) } } + + impl gadget_std::fmt::Display for [] { + fn fmt(&self, f: &mut gadget_std::fmt::Formatter<'_>) -> gadget_std::fmt::Result { + write!(f, "{}", hex::encode(self.to_bytes())) + } + } } }; } @@ -120,7 +132,7 @@ macro_rules! impl_sp_core_pair_public { macro_rules! impl_sp_core_signature { ($key_type:ident, $pair_type:ty) => { paste::paste! { - #[derive(Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] + #[derive(Default, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct [](pub <$pair_type as sp_core::Pair>::Signature); impl PartialOrd for [] { @@ -140,6 +152,12 @@ macro_rules! impl_sp_core_signature { write!(f, "{:?}", self.0.0) } } + + impl gadget_std::fmt::Display for [] { + fn fmt(&self, f: &mut gadget_std::fmt::Formatter<'_>) -> gadget_std::fmt::Result { + write!(f, "{}", hex::encode(self.0.0)) + } + } } }; } diff --git a/crates/macros/blueprint-proc-macro/src/job/mod.rs b/crates/macros/blueprint-proc-macro/src/job/mod.rs index 6c013fcab..6d0fb90af 100644 --- a/crates/macros/blueprint-proc-macro/src/job/mod.rs +++ b/crates/macros/blueprint-proc-macro/src/job/mod.rs @@ -422,8 +422,15 @@ pub(crate) fn generate_event_workflow_tokenstream( ctx_pos_in_ordered_inputs, )?, ListenerType::Custom => { - let job_processor_call = quote! { - let res = #fn_name_ident(context, param0) #asyncness; + let has_job_params = !params.is_empty(); + let job_processor_call = if has_job_params { + quote! { + let res = #fn_name_ident(context, param0) #asyncness; + } + } else { + quote! { + let res = #fn_name_ident(context) #asyncness; + } }; let is_result = return_type.is_result_type(); let process_result = if is_result { diff --git a/crates/macros/blueprint-proc-macro/src/shared.rs b/crates/macros/blueprint-proc-macro/src/shared.rs index 7eb8a86e0..0300e5871 100644 --- a/crates/macros/blueprint-proc-macro/src/shared.rs +++ b/crates/macros/blueprint-proc-macro/src/shared.rs @@ -237,7 +237,7 @@ pub(crate) trait MacroExt { ResultsKind::Types(types) => { let xs = types .iter() - .map(|ty| type_to_field_type(ty)) + .map(type_to_field_type) .collect::>>()?; Ok(xs) } diff --git a/crates/macros/context-derive/src/lib.rs b/crates/macros/context-derive/src/lib.rs index 620e7bdbc..75dd22a8d 100644 --- a/crates/macros/context-derive/src/lib.rs +++ b/crates/macros/context-derive/src/lib.rs @@ -20,9 +20,6 @@ mod eigenlayer; mod evm; /// Keystore context extension implementation. mod keystore; -/// P2P context extension implementation. -#[cfg(feature = "networking")] -mod p2p; /// Tangle context extensions. #[cfg(feature = "tangle")] mod tangle; @@ -116,18 +113,3 @@ pub fn derive_eigenlayer_context(input: TokenStream) -> TokenStream { Err(err) => TokenStream::from(err.to_compile_error()), } } - -/// Derive macro for generating Context Extensions trait implementation for `P2pContext`. -#[proc_macro_derive(P2pContext, attributes(config))] -#[cfg(feature = "networking")] -pub fn derive_p2p_context(input: TokenStream) -> TokenStream { - let input = syn::parse_macro_input!(input as syn::DeriveInput); - let result = - cfg::find_config_field(&input.ident, &input.data, CONFIG_TAG_NAME, CONFIG_TAG_TYPE) - .map(|config_field| p2p::generate_context_impl(input, config_field)); - - match result { - Ok(expanded) => TokenStream::from(expanded), - Err(err) => TokenStream::from(err.to_compile_error()), - } -} diff --git a/crates/macros/context-derive/src/p2p.rs b/crates/macros/context-derive/src/p2p.rs deleted file mode 100644 index c2fb1756f..000000000 --- a/crates/macros/context-derive/src/p2p.rs +++ /dev/null @@ -1,41 +0,0 @@ -use quote::quote; -use syn::DeriveInput; - -use crate::cfg::FieldInfo; - -/// Generate the `MPCContext` implementation for the given struct. -#[allow(clippy::too_many_lines)] -pub fn generate_context_impl( - DeriveInput { - ident: name, - generics, - .. - }: DeriveInput, - config_field: FieldInfo, -) -> proc_macro2::TokenStream { - let field_access = match config_field { - FieldInfo::Named(ident) => quote! { self.#ident }, - FieldInfo::Unnamed(index) => quote! { self.#index }, - }; - - let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - - quote! { - #[::blueprint_sdk::macros::ext::async_trait::async_trait] - impl #impl_generics ::blueprint_sdk::macros::ext::contexts::p2p::P2pContext for #name #ty_generics #where_clause { - fn p2p_client( - &self, - name: ::blueprint_sdk::macros::ext::std::string::String, - target_port: u16, - my_ecdsa_key: ::blueprint_sdk::macros::ext::contexts::p2p::GossipMsgKeyPair, - ) -> ::blueprint_sdk::macros::ext::contexts::p2p::P2PClient { - ::blueprint_sdk::macros::ext::contexts::p2p::P2PClient::new( - name, - #field_access.clone(), - target_port, - my_ecdsa_key.clone() - ) - } - } - } -} diff --git a/crates/macros/context-derive/tests/ui/basic.rs b/crates/macros/context-derive/tests/ui/basic.rs index 903a1c843..c12138446 100644 --- a/crates/macros/context-derive/tests/ui/basic.rs +++ b/crates/macros/context-derive/tests/ui/basic.rs @@ -2,7 +2,6 @@ use async_trait::async_trait; use blueprint_sdk::config::GadgetConfiguration; use blueprint_sdk::contexts::instrumented_evm_client::EvmInstrumentedClientContext as _; use blueprint_sdk::contexts::keystore::KeystoreContext as _; -use blueprint_sdk::contexts::p2p::P2pContext as _; use blueprint_sdk::contexts::services::ServicesContext as _; use blueprint_sdk::contexts::tangle::TangleClientContext as _; use blueprint_sdk::macros::ext::clients::GadgetServicesClient as _; @@ -14,12 +13,12 @@ use blueprint_sdk::std::collections::BTreeMap; use blueprint_sdk::std::sync::Arc; use blueprint_sdk::stores::local_database::LocalDatabase; use gadget_context_derive::{ - EVMProviderContext, KeystoreContext, P2pContext, ServicesContext, TangleClientContext, + EVMProviderContext, KeystoreContext, ServicesContext, TangleClientContext, }; use round_based::ProtocolMessage as RoundBasedProtocolMessage; use serde::{Deserialize, Serialize}; -#[derive(KeystoreContext, EVMProviderContext, TangleClientContext, ServicesContext, P2pContext)] +#[derive(KeystoreContext, EVMProviderContext, TangleClientContext, ServicesContext)] #[allow(dead_code)] struct MyContext { foo: String, @@ -41,7 +40,7 @@ fn main() { }; // Test existing context functions - let keystore = ctx.keystore(); + let _keystore = ctx.keystore(); let _evm_provider = ctx.evm_client(); let tangle_client = ctx.tangle_client().await.unwrap(); let _services_client = ctx.services_client().await; @@ -50,48 +49,12 @@ fn main() { .current_service_operators([0; 32], 0) .await .unwrap(); - let pub_key = keystore.generate::(None).unwrap(); - let pair = keystore.get_secret::(&pub_key).unwrap(); - let p2p_client = - ctx.p2p_client(String::from("Foo"), 1337, GossipMsgKeyPair(pair.0.clone())); - - // Test MPC context utility functions - let _config = p2p_client.config(); - let _protocol = p2p_client.network_protocol(None); - - // Test MPC context functions - - let mux = Arc::new(NetworkMultiplexer::new(StubNetwork)); - let party_index = 0; - let task_hash = [0u8; 32]; - let mut parties = BTreeMap::::new(); - parties.insert(0, pair.public()); - - // Test network delivery wrapper creation - let _network_wrapper = p2p_client.create_network_delivery_wrapper::( - mux.clone(), - party_index, - task_hash, - parties.clone(), - ); - - // TODO: Test party index retrieval - // let _party_idx = ctx.get_party_index().await; - - // TODO: Test participants retrieval - // let _participants = ctx.get_participants(&tangle_client).await; // Test blueprint ID retrieval let _blueprint_id = tangle_client.blueprint_id(); // Test party index and operators retrieval let _party_idx_ops = tangle_client.get_party_index_and_operators().await; - - // TODO: Test service operators ECDSA keys retrieval - // let _operator_keys = ctx.current_service_operators_ecdsa_keys().await; - - // TODO: Test current call ID retrieval - // let _call_id = tangle_client.current_call_id().await; }; drop(body); diff --git a/crates/networking/Cargo.toml b/crates/networking/Cargo.toml index 528f24f9c..46917174c 100644 --- a/crates/networking/Cargo.toml +++ b/crates/networking/Cargo.toml @@ -15,9 +15,11 @@ gadget-std = { workspace = true } # Core dependencies auto_impl = { workspace = true } +blake3 = { workspace = true } dashmap = { workspace = true } libp2p = { workspace = true } tokio = { workspace = true, features = ["macros"] } +tokio-stream = { workspace = true, features = ["time"] } futures = { workspace = true } tracing = { workspace = true } bincode = { workspace = true } @@ -29,14 +31,14 @@ hex = { workspace = true } itertools = { workspace = true, features = ["use_alloc"] } parking_lot = { workspace = true } thiserror = { workspace = true } +anyhow = { workspace = true } +crossbeam-channel = { workspace = true } # Crypto dependencies gadget-crypto = { workspace = true, features = ["k256", "hashing"] } +gadget-crypto-core = { workspace = true, features = ["k256"] } k256 = { workspace = true } -# Round-based protocol support -round-based = { workspace = true, optional = true } - [target.'cfg(not(target_family = "wasm"))'.dependencies.libp2p] workspace = true features = [ @@ -57,11 +59,17 @@ features = [ "ping", "dns", "autonat", + "upnp", + "serde", ] [dev-dependencies] +gadget-networking = { workspace = true, features = ["sp-core-ecdsa"] } +gadget-crypto = { workspace = true, features = ["sp-core"] } +gadget-crypto-core = { workspace = true, features = ["tangle"] } tracing-subscriber = { workspace = true } lazy_static = { workspace = true } +fastrand = "2.0" [features] default = ["std"] @@ -76,8 +84,6 @@ std = [ "serde_json/std", ] -round-based-compat = ["dep:round-based"] - # Only one of these features should be enabled at a time. # If none are enabled, k256 ECDSA will be used by default. sp-core-ecdsa = ["gadget-crypto/sp-core"] diff --git a/crates/networking/README.md b/crates/networking/README.md new file mode 100644 index 000000000..88136d528 --- /dev/null +++ b/crates/networking/README.md @@ -0,0 +1,178 @@ +# Networking Protocol Documentation + +This document outlines the key protocols used in the networking layer. + +## Handshake Protocol + +The handshake protocol ensures mutual authentication between peers before allowing protocol messages. + +```mermaid +sequenceDiagram + participant A as Peer A + participant B as Peer B + + Note over A,B: Initial TCP/QUIC Connection Established + + Note over A: Create handshake message with:
1. A's peer ID
2. Current timestamp + Note over A: Sign(A_id | B_id | timestamp) + A->>+B: HandshakeRequest { + public_key: A_pub, + signature: sign(msg), + msg: HandshakeMessage { + sender: A_id, + timestamp: now + } + } + + Note over B: 1. Verify timestamp is fresh
2. Verify A_pub derives to A_id
3. Verify signature
4. Store A's public key + + Note over B: Create handshake message with:
1. B's peer ID
2. Current timestamp + Note over B: Sign(B_id | A_id | timestamp) + B-->>-A: HandshakeResponse { + public_key: B_pub, + signature: sign(msg), + msg: HandshakeMessage { + sender: B_id, + timestamp: now + } + } + + Note over A: 1. Verify timestamp is fresh
2. Verify B_pub derives to B_id
3. Verify signature
4. Store B's public key + + Note over A,B: ✓ Handshake Complete + Note over A,B: ✓ Protocol Messages Allowed +``` + +### Handshake States + +```mermaid +stateDiagram-v2 + direction LR + [*] --> Connected: New Connection + + Connected --> OutboundPending: Send Handshake + Connected --> InboundPending: Receive Handshake + + OutboundPending --> Verifying: Valid Response + OutboundPending --> Failed: Invalid/Timeout + + InboundPending --> Verifying: Valid Request + InboundPending --> Failed: Invalid/Timeout + + Verifying --> Verified: All Checks Pass + Verifying --> Failed: Checks Fail + + Verified --> [*]: Connection Closed + Failed --> [*]: Connection Closed + + note right of Connected + Initial TCP/QUIC connection established + end note + + note right of Verifying + Checks: + 1. Timestamp fresh + 2. PubKey matches PeerId + 3. Signature valid + 4. Key whitelisted + end note + + note right of Verified + Both peers authenticated + Protocol messages allowed + end note +``` + +## Protocol Message Exchange + +After handshake completion, peers can exchange protocol messages through direct P2P or broadcast channels. + +```mermaid +sequenceDiagram + participant A as Peer A (Verified) + participant B as Peer B (Verified) + + Note over A,B: ✓ Handshake Completed + + A->>+B: InstanceMessageRequest { + protocol: String, + payload: Vec, + metadata: Option> + } + + alt Success Case + B-->>-A: InstanceMessageResponse::Success { + data: Option> + } + else Protocol Response + B-->>-A: InstanceMessageResponse::Protocol { + data: Vec + } + else Error Case + B-->>-A: InstanceMessageResponse::Error { + code: u16, + message: String + } + end +``` + +### Message Flow States + +```mermaid +stateDiagram-v2 + direction LR + [*] --> Handshaked: Peers Verified + + Handshaked --> RequestPending: Send Request + RequestPending --> Processing: Request Received + + Processing --> ResponseSent: Success/Protocol + Processing --> ErrorSent: Error + + ResponseSent --> Handshaked: Complete + ErrorSent --> Handshaked: Complete + + Handshaked --> [*]: Connection Closed + + note right of Handshaked + Peers authenticated + Ready for messages + end note + + note right of Processing + Validating request + Processing payload + end note +``` + +## Protocol Details + +### Handshake Protocol + +- Initiated on first connection +- Mutual authentication using public key cryptography +- Signatures verify peer identity and ownership +- Timestamps prevent replay attacks +- Timeouts after 30 seconds +- Handles concurrent handshakes gracefully + +### Protocol Message Types + +- Direct P2P messages: + - Targeted to specific peer + - Requires peer verification + - Guaranteed delivery attempt +- Broadcast messages: + - Sent to all peers + - Uses gossipsub protocol + - Best-effort delivery + +### Security Features + +- Peer verification before message acceptance +- Public key to peer ID verification +- Timestamp-based replay protection +- Signature verification for handshakes +- Banned peer tracking +- Connection limits +- Protocol version validation diff --git a/crates/networking/extensions/round-based/Cargo.toml b/crates/networking/extensions/round-based/Cargo.toml new file mode 100644 index 000000000..006905b4d --- /dev/null +++ b/crates/networking/extensions/round-based/Cargo.toml @@ -0,0 +1,58 @@ +[package] +name = "gadget-networking-round-based-extension" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +gadget-networking = { workspace = true } +round-based = { workspace = true } +tokio = { workspace = true } +futures = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +tracing = { workspace = true } +dashmap = { workspace = true } +crossbeam = { workspace = true } +crossbeam-channel = { workspace = true } +thiserror = { workspace = true } + +[dev-dependencies] +round-based = { workspace = true, features = ["derive", "sim", "sim-async"] } +libp2p = { workspace = true, features = [ + "tokio", + "gossipsub", + "mdns", + "noise", + "macros", + "yamux", + "tcp", + "quic", + "request-response", + "cbor", + "identify", + "kad", + "dcutr", + "relay", + "ping", + "dns", + "autonat", + "upnp", +] } +gadget-crypto = { workspace = true, features = ["sp-core"] } +gadget-crypto-core = { workspace = true, features = ["tangle"] } + +tokio = { workspace = true, features = ["full"] } +tracing-subscriber = { workspace = true } +sha2 = { workspace = true } +rand_dev = "0.1" +rand = { workspace = true } +hex = { workspace = true } +# We don't use it directly, but we need to enable `serde` feature +generic-array = { version = "0.14", features = ["serde"] } + +[lints] +workspace = true diff --git a/crates/networking/extensions/round-based/src/lib.rs b/crates/networking/extensions/round-based/src/lib.rs new file mode 100644 index 000000000..75159769a --- /dev/null +++ b/crates/networking/extensions/round-based/src/lib.rs @@ -0,0 +1,254 @@ +#[cfg(test)] +mod tests; + +use crossbeam_channel::{self, Receiver, Sender}; +use dashmap::DashMap; +use futures::Future; +use futures::{Sink, Stream}; +use gadget_networking::{ + key_types::InstanceMsgPublicKey, + service_handle::NetworkServiceHandle, + types::{ParticipantInfo, ProtocolMessage}, +}; +use round_based::{ + Delivery, Incoming, MessageDestination, MessageType, MsgId, Outgoing, PartyIndex, +}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + collections::HashMap, + pin::Pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + task::{Context, Poll}, +}; + +/// Wrapper to adapt NetworkServiceHandle to round-based protocols +pub struct RoundBasedNetworkAdapter { + /// The underlying network handle + handle: NetworkServiceHandle, + /// Current party's index + party_index: PartyIndex, + /// Mapping of party indices to their public keys + parties: Arc>, + /// Counter for message IDs + next_msg_id: Arc, + /// Protocol identifier + protocol_id: String, + _phantom: std::marker::PhantomData, +} + +impl RoundBasedNetworkAdapter +where + M: Clone + Send + Sync + Unpin + 'static, + M: Serialize + DeserializeOwned, + M: round_based::ProtocolMessage, +{ + pub fn new( + handle: NetworkServiceHandle, + party_index: PartyIndex, + parties: HashMap, + protocol_id: impl Into, + ) -> Self { + Self { + handle, + party_index, + parties: Arc::new(DashMap::from_iter(parties)), + next_msg_id: Arc::new(AtomicU64::new(0)), + protocol_id: protocol_id.into(), + _phantom: std::marker::PhantomData, + } + } +} + +impl Delivery for RoundBasedNetworkAdapter +where + M: Clone + Send + Sync + Unpin + 'static, + M: Serialize + DeserializeOwned, + M: round_based::ProtocolMessage, +{ + type Send = RoundBasedSender; + type Receive = RoundBasedReceiver; + type SendError = NetworkError; + type ReceiveError = NetworkError; + + fn split(self) -> (Self::Receive, Self::Send) { + let RoundBasedNetworkAdapter { + handle, + party_index, + parties, + next_msg_id, + protocol_id, + .. + } = self; + + let sender = RoundBasedSender { + handle: handle.clone(), + party_index, + parties: parties.clone(), + next_msg_id: next_msg_id.clone(), + protocol_id: protocol_id.clone(), + _phantom: std::marker::PhantomData, + }; + + let receiver = RoundBasedReceiver::new(handle, party_index); + + (receiver, sender) + } +} + +pub struct RoundBasedSender { + handle: NetworkServiceHandle, + party_index: PartyIndex, + parties: Arc>, + next_msg_id: Arc, + protocol_id: String, + _phantom: std::marker::PhantomData, +} + +impl Sink> for RoundBasedSender +where + M: Serialize + round_based::ProtocolMessage + Clone + Unpin, +{ + type Error = NetworkError; + + fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, outgoing: Outgoing) -> Result<(), Self::Error> { + let this = self.get_mut(); + let msg_id = this.next_msg_id.fetch_add(1, Ordering::Relaxed); + let round = outgoing.msg.round(); + + tracing::trace!( + i = %this.party_index, + recipient = ?outgoing.recipient, + %round, + %msg_id, + protocol_id = %this.protocol_id, + "Sending message", + ); + + let (recipient, recipient_key) = match outgoing.recipient { + MessageDestination::AllParties => (None, None), + MessageDestination::OneParty(p) => { + let key = this.parties.get(&p).map(|k| k.clone()); + (Some(p), key) + } + }; + + let protocol_message = ProtocolMessage { + protocol: format!("{}/{}", this.protocol_id, round), + routing: gadget_networking::types::MessageRouting { + message_id: msg_id, + round_id: round, + sender: ParticipantInfo { + id: gadget_networking::types::ParticipantId(this.party_index), + public_key: this.parties.get(&this.party_index).map(|k| k.clone()), + }, + recipient: recipient.map(|p| ParticipantInfo { + id: gadget_networking::types::ParticipantId(p), + public_key: recipient_key, + }), + }, + payload: serde_json::to_vec(&outgoing.msg) + .map_err(|e| NetworkError::Serialization(e))?, + }; + + tracing::trace!( + %round, + %msg_id, + protocol_id = %this.protocol_id, + "Sending message to network", + ); + + this.handle + .send(protocol_message.routing, protocol_message.payload) + .map_err(|e| NetworkError::Send(e)) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} + +pub struct RoundBasedReceiver { + handle: NetworkServiceHandle, + party_index: PartyIndex, + _phantom: std::marker::PhantomData, +} + +impl RoundBasedReceiver { + fn new(handle: NetworkServiceHandle, party_index: PartyIndex) -> Self { + Self { + handle, + party_index, + _phantom: std::marker::PhantomData, + } + } +} + +impl Stream for RoundBasedReceiver +where + M: DeserializeOwned + round_based::ProtocolMessage + Unpin, +{ + type Item = Result, NetworkError>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + // Get a mutable reference to self + let this = self.get_mut(); + + let next_protocol_message = this.handle.next_protocol_message(); + match next_protocol_message { + Some(protocol_message) => { + let msg_type = if protocol_message.routing.recipient.is_some() { + MessageType::P2P + } else { + MessageType::Broadcast + }; + + let sender = protocol_message.routing.sender.id.0; + let id = protocol_message.routing.message_id; + + tracing::trace!( + i = %this.party_index, + sender = ?sender, + %id, + protocol_id = %protocol_message.protocol, + ?msg_type, + size = %protocol_message.payload.len(), + "Received message", + ); + match serde_json::from_slice(&protocol_message.payload) { + Ok(msg) => Poll::Ready(Some(Ok(Incoming { + msg, + sender, + id, + msg_type, + }))), + Err(e) => Poll::Ready(Some(Err(NetworkError::Serialization(e)))), + } + } + None => { + //tracing::trace!(i = %this.party_index, "No message received; the waker will wake us up when there is a new message"); + // In this case, tell the waker to wake us up when there is a new message + cx.waker().wake_by_ref(); + Poll::Pending + } + } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum NetworkError { + #[error("Failed to serialize message: {0}")] + Serialization(#[from] serde_json::Error), + #[error("Network error: {0}")] + Send(String), +} diff --git a/crates/networking/extensions/round-based/src/tests.rs b/crates/networking/extensions/round-based/src/tests.rs new file mode 100644 index 000000000..68c5d8210 --- /dev/null +++ b/crates/networking/extensions/round-based/src/tests.rs @@ -0,0 +1,61 @@ +use gadget_networking::{KeyType, NetworkConfig, NetworkService}; +use libp2p::Multiaddr; +use round_based::ProtocolMessage; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +#[derive(Debug, Serialize, Deserialize, Clone, ProtocolMessage)] +enum Msg { + Round1(Round1Msg), + Round2(Round2Msg), + Round3(Round3Msg), +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +struct Round1Msg { + pub power: u16, + pub hitpoints: u16, + pub armor: u16, + pub name: String, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +struct Round2Msg { + pub x: u16, + pub y: u16, + pub z: u16, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +struct Round3Msg { + rotation: u16, + velocity: (u16, u16, u16), +} + +const TOPIC: &str = "/gadget/test/1.0.0"; + +fn node() -> NetworkService { + let local_key = libp2p::identity::Keypair::generate_ed25519(); + let listen_addr: Multiaddr = "/ip4/127.0.0.1/tcp/0".parse().unwrap(); + + let instance_key_pair = gadget_networking::Curve::generate_with_seed(None).unwrap(); + + let config = NetworkConfig { + network_name: TOPIC.to_string(), + instance_id: String::from("0"), + instance_key_pair, + local_key, + listen_addr, + target_peer_count: 0, + bootstrap_peers: vec![], + enable_mdns: true, + enable_kademlia: true, + }; + + NetworkService::new(config, HashSet::default()).unwrap() +} + +#[tokio::test] +async fn round_based() { + let node = node(); +} diff --git a/crates/networking/extensions/round-based/tests/common/mod.rs b/crates/networking/extensions/round-based/tests/common/mod.rs new file mode 100644 index 000000000..a33372c52 --- /dev/null +++ b/crates/networking/extensions/round-based/tests/common/mod.rs @@ -0,0 +1,242 @@ +use gadget_crypto::KeyType; +use gadget_networking::service::NetworkMessage; +use gadget_networking::{ + key_types::{Curve, InstanceMsgKeyPair, InstanceMsgPublicKey}, + service_handle::NetworkServiceHandle, + NetworkConfig, NetworkService, +}; +use libp2p::{ + identity::{self, Keypair}, + Multiaddr, PeerId, +}; +use std::string::ToString; +use std::{collections::HashSet, time::Duration}; +use tokio::time::timeout; +use tracing::info; + +pub fn init_tracing() { + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .with_target(true) + .with_thread_ids(false) + .with_file(true) + .with_line_number(true) + .try_init(); +} + +/// Test node configuration for network tests +pub struct TestNode { + pub service: Option, + pub peer_id: PeerId, + pub listen_addr: Option, + pub instance_key_pair: InstanceMsgKeyPair, + pub local_key: Keypair, +} + +impl TestNode { + /// Create a new test node with auto-generated keys + pub async fn new( + network_name: &str, + instance_id: &str, + allowed_keys: HashSet, + bootstrap_peers: Vec, + ) -> Self { + Self::new_with_keys( + network_name, + instance_id, + allowed_keys, + bootstrap_peers, + None, + None, + ) + .await + } + + /// Create a new test node with specified keys + pub async fn new_with_keys( + network_name: &str, + instance_id: &str, + allowed_keys: HashSet, + bootstrap_peers: Vec, + instance_key_pair: Option, + local_key: Option, + ) -> Self { + let local_key = local_key.unwrap_or_else(|| identity::Keypair::generate_ed25519()); + let peer_id = local_key.public().to_peer_id(); + + // Bind to all interfaces instead of just localhost + let listen_addr: Multiaddr = "/ip4/0.0.0.0/tcp/0".parse().unwrap(); + info!("Creating test node {peer_id} with TCP address: {listen_addr}"); + + let instance_key_pair = + instance_key_pair.unwrap_or_else(|| Curve::generate_with_seed(None).unwrap()); + + let config = NetworkConfig { + network_name: network_name.to_string(), + instance_id: instance_id.to_string(), + instance_key_pair: instance_key_pair.clone(), + local_key: local_key.clone(), + listen_addr: listen_addr.clone(), + target_peer_count: 10, + bootstrap_peers, + enable_mdns: true, + enable_kademlia: true, + }; + + let service = + NetworkService::new(config, allowed_keys).expect("Failed to create network service"); + + Self { + service: Some(service), + peer_id, + listen_addr: None, + instance_key_pair, + local_key, + } + } + + /// Start the node and wait for it to be fully initialized + pub async fn start(&mut self) -> Result { + // Take ownership of the service + let service = self.service.take().ok_or("Service already started")?; + let handle = service.start(); + + // Wait for the node to be fully initialized + let timeout_duration = Duration::from_secs(10); // Increased timeout + match timeout(timeout_duration, async { + // First wait for the listening address + while self.listen_addr.is_none() { + if let Some(addr) = handle.get_listen_addr() { + info!("Node {} listening on {}", self.peer_id, addr); + self.listen_addr = Some(addr.clone()); + + // Extract port from multiaddr + let addr_str = addr.to_string(); + let port = addr_str.split("/").nth(4).unwrap_or("0").to_string(); + + // Try localhost first + let localhost_addr = format!("127.0.0.1:{}", port); + match tokio::net::TcpStream::connect(&localhost_addr).await { + Ok(_) => { + info!("Successfully verified localhost port for {}", self.peer_id); + break; + } + Err(e) => { + info!("Localhost port not ready for {}: {}", self.peer_id, e); + // Try external IP + let external_addr = format!("10.0.1.142:{}", port); + match tokio::net::TcpStream::connect(&external_addr).await { + Ok(_) => { + info!( + "Successfully verified external port for {}", + self.peer_id + ); + break; + } + Err(e) => { + info!("External port not ready for {}: {}", self.peer_id, e); + tokio::time::sleep(Duration::from_millis(100)).await; + continue; + } + } + } + } + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + + // Give the node a moment to initialize protocols + tokio::time::sleep(Duration::from_millis(500)).await; + + Ok::<(), &'static str>(()) + }) + .await + { + Ok(Ok(_)) => { + info!("Node {} fully initialized", self.peer_id); + Ok(handle) + } + Ok(Err(e)) => Err(e), + Err(_) => Err("Timeout waiting for node to initialize"), + } + } + + /// Get the actual listening address + pub fn get_listen_addr(&self) -> Option { + self.listen_addr.clone() + } +} + +/// Wait for a condition with timeout +pub async fn wait_for_condition(timeout: Duration, mut condition: F) -> Result<(), &'static str> +where + F: FnMut() -> bool, +{ + let start = std::time::Instant::now(); + while !condition() { + if start.elapsed() > timeout { + return Err("Timeout waiting for condition"); + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + Ok(()) +} + +/// Wait for peers to discover each other +pub async fn wait_for_peer_discovery( + handles: &[&NetworkServiceHandle], + timeout: Duration, +) -> Result<(), &'static str> { + info!("Waiting for peer discovery..."); + + wait_for_condition(timeout, || { + for (i, handle1) in handles.iter().enumerate() { + for (j, handle2) in handles.iter().enumerate() { + if i != j + && !handle1 + .peers() + .iter() + .any(|id| *id == handle2.local_peer_id) + { + return false; + } + } + } + true + }) + .await +} + +/// Wait for peer info to be updated +pub async fn wait_for_peer_info( + handle1: &NetworkServiceHandle, + handle2: &NetworkServiceHandle, + timeout: Duration, +) { + info!("Waiting for identify info..."); + + match tokio::time::timeout(timeout, async { + loop { + let peer_info1 = handle1.peer_info(&handle2.local_peer_id); + let peer_info2 = handle2.peer_info(&handle1.local_peer_id); + + if let Some(peer_info) = peer_info1 { + if peer_info.identify_info.is_some() { + // Also verify reverse direction + if let Some(peer_info) = peer_info2 { + if peer_info.identify_info.is_some() { + info!("Identify info populated in both directions"); + break; + } + } + } + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + { + Ok(_) => info!("Peer info updated successfully in both directions"), + Err(_) => panic!("Peer info update timed out"), + } +} diff --git a/crates/networking/extensions/round-based/tests/rand_protocol.rs b/crates/networking/extensions/round-based/tests/rand_protocol.rs new file mode 100644 index 000000000..0259a0bc2 --- /dev/null +++ b/crates/networking/extensions/round-based/tests/rand_protocol.rs @@ -0,0 +1,299 @@ +//! Simple protocol in which parties cooperate to generate randomness + +mod common; + +use serde::{Deserialize, Serialize}; +use sha2::{digest::Output, Digest, Sha256}; + +use round_based::rounds_router::{ + simple_store::{RoundInput, RoundInputError}, + CompleteRoundError, RoundsRouter, +}; +use round_based::{Delivery, Mpc, MpcParty, MsgId, Outgoing, PartyIndex, ProtocolMessage, SinkExt}; + +/// Protocol message +#[derive(Clone, Debug, PartialEq, ProtocolMessage, Serialize, Deserialize)] +pub enum Msg { + /// Round 1 + CommitMsg(CommitMsg), + /// Round 2 + DecommitMsg(DecommitMsg), +} + +/// Message from round 1 +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct CommitMsg { + /// Party commitment + pub commitment: Output, +} + +/// Message from round 2 +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct DecommitMsg { + /// Randomness generated by party + pub randomness: [u8; 32], +} + +/// Carries out the randomness generation protocol +#[tracing::instrument(skip(party, rng))] +pub async fn protocol_of_random_generation( + party: M, + i: PartyIndex, + n: u16, + mut rng: R, +) -> Result<[u8; 32], Error> +where + M: Mpc, + R: rand::RngCore, +{ + let MpcParty { delivery, .. } = party.into_party(); + let (incoming, mut outgoing) = delivery.split(); + + // Define rounds + let mut rounds = RoundsRouter::::builder(); + let round1 = rounds.add_round(RoundInput::::broadcast(i, n)); + let round2 = rounds.add_round(RoundInput::::broadcast(i, n)); + let mut rounds = rounds.listen(incoming); + + // --- The Protocol --- + + // 1. Generate local randomness + let mut local_randomness = [0u8; 32]; + rng.fill_bytes(&mut local_randomness); + + tracing::debug!(local_randomness = %hex::encode(&local_randomness), "Generated local randomness"); + + // 2. Commit local randomness (broadcast m=sha256(randomness)) + let commitment = Sha256::digest(local_randomness); + tracing::debug!(commitment = %hex::encode(&commitment), "Committed local randomness"); + outgoing + .send(Outgoing::broadcast(Msg::CommitMsg(CommitMsg { + commitment, + }))) + .await + .map_err(Error::Round1Send)?; + + tracing::debug!("Sent commitment and waiting for others to send theirs"); + + // 3. Receive committed randomness from other parties + let commitments = rounds + .complete(round1) + .await + .map_err(Error::Round1Receive)?; + + tracing::debug!("Received commitments from all parties"); + + // 4. Open local randomness + tracing::debug!("Opening local randomness"); + outgoing + .send(Outgoing::broadcast(Msg::DecommitMsg(DecommitMsg { + randomness: local_randomness, + }))) + .await + .map_err(Error::Round2Send)?; + + tracing::debug!("Sent decommitment and waiting for others to send theirs"); + + // 5. Receive opened local randomness from other parties, verify them, and output protocol randomness + let randomness = rounds + .complete(round2) + .await + .map_err(Error::Round2Receive)?; + + tracing::debug!("Received decommitments from all parties"); + + let mut guilty_parties = vec![]; + let mut output = local_randomness; + for ((party_i, com_msg_id, commit), (_, decom_msg_id, decommit)) in commitments + .into_iter_indexed() + .zip(randomness.into_iter_indexed()) + { + let commitment_expected = Sha256::digest(decommit.randomness); + if commit.commitment != commitment_expected { + guilty_parties.push(Blame { + guilty_party: party_i, + commitment_msg: com_msg_id, + decommitment_msg: decom_msg_id, + }); + continue; + } + + output + .iter_mut() + .zip(decommit.randomness) + .for_each(|(x, r)| *x ^= r); + } + + if !guilty_parties.is_empty() { + tracing::error!(guilty_parties = ?guilty_parties, "Some parties cheated"); + Err(Error::PartiesOpenedRandomnessDoesntMatchCommitment { guilty_parties }) + } else { + tracing::debug!(output = %hex::encode(&output), "Generated randomness"); + tracing::info!("Randomness generation protocol completed successfully."); + Ok(output) + } +} + +/// Protocol error +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Couldn't send a message in the first round + #[error("send a message at round 1")] + Round1Send(#[source] SendErr), + /// Couldn't receive a message in the first round + #[error("receive messages at round 1")] + Round1Receive(#[source] CompleteRoundError), + /// Couldn't send a message in the second round + #[error("send a message at round 2")] + Round2Send(#[source] SendErr), + /// Couldn't receive a message in the second round + #[error("receive messages at round 2")] + Round2Receive(#[source] CompleteRoundError), + + /// Some of the parties cheated + #[error("malicious parties: {guilty_parties:?}")] + PartiesOpenedRandomnessDoesntMatchCommitment { + /// List of cheated parties + guilty_parties: Vec, + }, +} + +/// Blames a party in cheating during the protocol +#[derive(Debug)] +pub struct Blame { + /// Index of the cheated party + pub guilty_party: PartyIndex, + /// ID of the message that party sent in the first round + pub commitment_msg: MsgId, + /// ID of the message that party sent in the second round + pub decommitment_msg: MsgId, +} + +#[cfg(test)] +mod tests { + use std::collections::{HashMap, HashSet}; + use std::time::Duration; + + use super::common::*; + use gadget_networking::{Curve, KeyType}; + use gadget_networking_round_based_extension::RoundBasedNetworkAdapter; + use rand::Rng; + use round_based::MpcParty; + use sha2::{Digest, Sha256}; + use tracing::{debug, info}; + + use super::protocol_of_random_generation; + + #[test] + fn simulation() { + let mut rng = rand_dev::DevRng::new(); + + let n: u16 = 5; + + let randomness = round_based::sim::run_with_setup( + core::iter::repeat_with(|| rng.fork()).take(n.into()), + |i, party, rng| protocol_of_random_generation(party, i, n, rng), + ) + .unwrap() + .expect_ok() + .expect_eq(); + + std::println!("Output randomness: {}", hex::encode(randomness)); + } + + #[tokio::test] + async fn simulation_async() { + let mut rng = rand_dev::DevRng::new(); + + let n: u16 = 5; + + let randomness = round_based::sim::async_env::run_with_setup( + core::iter::repeat_with(|| rng.fork()).take(n.into()), + |i, party, rng| protocol_of_random_generation(party, i, n, rng), + ) + .await + .expect_ok() + .expect_eq(); + + std::println!("Output randomness: {}", hex::encode(randomness)); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn p2p_networking() { + init_tracing(); + let network_name = "rand-test-network"; + let instance_id = "rand-test-instance"; + + // Generate node2's key pair first + let instance_key_pair2 = Curve::generate_with_seed(None).unwrap(); + let mut allowed_keys1 = HashSet::new(); + allowed_keys1.insert(instance_key_pair2.public()); + + // Create node1 with node2's key whitelisted + let mut node1 = TestNode::new(network_name, instance_id, allowed_keys1, vec![]).await; + + // Create node2 with node1's key whitelisted and pre-generated key + let mut allowed_keys2 = HashSet::new(); + allowed_keys2.insert(node1.instance_key_pair.public()); + let mut node2 = TestNode::new_with_keys( + network_name, + instance_id, + allowed_keys2, + vec![], + Some(instance_key_pair2), + None, + ) + .await; + + info!("Starting nodes"); + // Start both nodes - this should trigger automatic handshake + let handle1 = node1.start().await.expect("Failed to start node1"); + let handle2 = node2.start().await.expect("Failed to start node2"); + + wait_for_peer_discovery(&[&handle1, &handle2], Duration::from_secs(5)) + .await + .unwrap(); + + let parties = HashMap::from_iter([ + (0, node1.instance_key_pair.public()), + (1, node2.instance_key_pair.public()), + ]); + + let node1_network = RoundBasedNetworkAdapter::new(handle1, 0, parties.clone(), instance_id); + let node2_network = RoundBasedNetworkAdapter::new(handle2, 1, parties, instance_id); + + let mut tasks = vec![]; + tasks.push(tokio::spawn(async move { + let mut rng = rand_dev::DevRng::new(); + let mpc_party = MpcParty::connected(node1_network); + let randomness = protocol_of_random_generation(mpc_party, 0, 2, &mut rng) + .await + .expect("Failed to generate randomness"); + debug!("Node1 generated randomness: {:?}", randomness); + randomness + })); + + tasks.push(tokio::spawn(async move { + let mut rng = rand_dev::DevRng::new(); + let mpc_party = MpcParty::connected(node2_network); + let randomness = protocol_of_random_generation(mpc_party, 1, 2, &mut rng) + .await + .expect("Failed to generate randomness"); + debug!("Node2 generated randomness: {:?}", randomness); + randomness + })); + + let results = futures::future::join_all(tasks).await; + + for result in results { + match result { + Ok(randomness) => { + debug!("Randomness result: {:?}", randomness); + } + Err(e) => { + panic!("Error in randomness generation: {:?}", e); + } + } + } + } +} diff --git a/crates/networking/src/behaviours.rs b/crates/networking/src/behaviours.rs new file mode 100644 index 000000000..40b3b737e --- /dev/null +++ b/crates/networking/src/behaviours.rs @@ -0,0 +1,125 @@ +use crate::error::Result as NetworkingResult; +use crate::key_types::InstanceMsgKeyPair; +use crate::types::ProtocolMessage; +use crate::{ + blueprint_protocol::{BlueprintProtocolBehaviour, BlueprintProtocolEvent}, + discovery::{ + behaviour::{DiscoveryBehaviour, DiscoveryEvent}, + config::DiscoveryConfig, + PeerInfo, PeerManager, + }, +}; +use crossbeam_channel::Sender; +use libp2p::{ + connection_limits::{self, ConnectionLimits}, + identity::Keypair, + kad::QueryId, + ping, + swarm::NetworkBehaviour, + Multiaddr, PeerId, +}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Duration, +}; +use tracing::{debug, info}; + +/// Events that can be emitted by the `GadgetBehavior` +#[derive(Debug)] +pub enum GadgetEvent { + /// Discovery-related events + Discovery(DiscoveryEvent), + /// Ping events for connection liveness + Ping(ping::Event), + /// Blueprint protocol events + Blueprint(BlueprintProtocolEvent), +} + +#[derive(NetworkBehaviour)] +pub struct GadgetBehaviour { + /// Connection limits to prevent `DoS` + connection_limits: connection_limits::Behaviour, + /// Discovery mechanisms (Kademlia, mDNS, etc) + pub(super) discovery: DiscoveryBehaviour, + /// Direct P2P messaging and gossip + pub(super) blueprint_protocol: BlueprintProtocolBehaviour, + /// Connection liveness checks + ping: ping::Behaviour, +} + +impl GadgetBehaviour { + #[must_use] + pub fn new( + network_name: &str, + blueprint_protocol_name: &str, + local_key: &Keypair, + instance_key_pair: &InstanceMsgKeyPair, + target_peer_count: u32, + peer_manager: Arc, + protocol_message_sender: Sender, + ) -> NetworkingResult { + let connection_limits = connection_limits::Behaviour::new( + ConnectionLimits::default() + .with_max_pending_incoming(Some(target_peer_count)) + .with_max_pending_outgoing(Some(target_peer_count)) + .with_max_established_incoming(Some(target_peer_count)) + .with_max_established_outgoing(Some(target_peer_count)) + .with_max_established_per_peer(Some(target_peer_count)), + ); + + let ping = ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(30))); + + info!( + "Setting up discovery behavior with network name: {}", + network_name + ); + let discovery = DiscoveryConfig::new(local_key.public(), network_name) + .mdns(true) + .kademlia(true) + .target_peer_count(target_peer_count) + .build()?; + + info!( + "Setting up blueprint protocol with name: {}", + blueprint_protocol_name + ); + let blueprint_protocol = BlueprintProtocolBehaviour::new( + local_key, + instance_key_pair, + peer_manager, + blueprint_protocol_name, + protocol_message_sender, + ); + + debug!("Created GadgetBehaviour with all components initialized"); + Ok(Self { + connection_limits, + discovery, + blueprint_protocol, + ping, + }) + } + + /// Bootstrap Kademlia network + pub fn bootstrap(&mut self) -> NetworkingResult { + self.discovery.bootstrap() + } + + /// Returns a set of peer ids + #[must_use] + pub fn peers(&self) -> &HashSet { + self.discovery.get_peers() + } + + /// Returns a map of peer ids and their multi-addresses + #[must_use] + pub fn peer_addresses(&self) -> HashMap> { + self.discovery.get_peer_addresses() + } + + #[must_use] + pub fn peer_info(&self, peer_id: &PeerId) -> Option<&PeerInfo> { + self.discovery.get_peer_info(peer_id) + } +} diff --git a/crates/networking/src/blueprint_protocol/behaviour.rs b/crates/networking/src/blueprint_protocol/behaviour.rs new file mode 100644 index 000000000..52f3104ed --- /dev/null +++ b/crates/networking/src/blueprint_protocol/behaviour.rs @@ -0,0 +1,499 @@ +use super::{InstanceMessageRequest, InstanceMessageResponse}; +use crate::blueprint_protocol::HandshakeMessage; +use crate::discovery::PeerManager; +use crate::{ + types::ProtocolMessage, Curve, InstanceMsgKeyPair, InstanceMsgPublicKey, + InstanceSignedMsgSignature, +}; +use bincode; +use crossbeam_channel::Sender; +use dashmap::DashMap; +use gadget_crypto::KeyType; +use libp2p::{ + core::transport::PortUse, + gossipsub::{self, IdentTopic, MessageId, Sha256Topic}, + identity::Keypair, + request_response::{self, OutboundRequestId, ResponseChannel}, + swarm::{ + ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, + }, + Multiaddr, PeerId, StreamProtocol, +}; +use std::{ + sync::Arc, + task::Poll, + time::{Duration, Instant}, +}; +use tracing::{debug, error, info, trace, warn}; + +#[derive(NetworkBehaviour)] +pub struct DerivedBlueprintProtocolBehaviour { + /// Request/response protocol for p2p messaging + request_response: + request_response::cbor::Behaviour, + /// Gossipsub for broadcast messaging + gossipsub: gossipsub::Behaviour, +} + +/// Events emitted by the `BlueprintProtocolBehaviour` +#[derive(Debug)] +pub enum BlueprintProtocolEvent { + /// Request received from a peer + Request { + peer: PeerId, + request: InstanceMessageRequest, + channel: ResponseChannel, + }, + /// Response received from a peer + Response { + peer: PeerId, + request_id: OutboundRequestId, + response: InstanceMessageResponse, + }, + /// Gossip message received + GossipMessage { + source: PeerId, + message: Vec, + topic: IdentTopic, + }, +} + +/// Behaviour that handles the blueprint protocol request/response and gossip +pub struct BlueprintProtocolBehaviour { + /// Request/response protocol for direct messaging + blueprint_protocol: DerivedBlueprintProtocolBehaviour, + /// Name of the blueprint protocol + pub(crate) blueprint_protocol_name: String, + /// Peer manager for tracking peer states + pub(crate) peer_manager: Arc, + /// Libp2p peer ID + pub(crate) local_peer_id: PeerId, + /// Instance key pair for handshakes and blueprint protocol + pub(crate) instance_key_pair: InstanceMsgKeyPair, + /// Peers with pending inbound handshakes + pub(crate) inbound_handshakes: DashMap, + /// Peers with pending outbound handshakes + pub(crate) outbound_handshakes: DashMap, + /// Active response channels + pub(crate) response_channels: + DashMap>, + /// Protocol message sender + pub(crate) protocol_message_sender: Sender, +} + +impl BlueprintProtocolBehaviour { + /// Create a new blueprint protocol behaviour + #[must_use] + pub fn new( + local_key: &Keypair, + instance_key_pair: &InstanceMsgKeyPair, + peer_manager: Arc, + blueprint_protocol_name: &str, + protocol_message_sender: Sender, + ) -> Self { + let blueprint_protocol_name = blueprint_protocol_name.to_string(); + let protocols = vec![( + StreamProtocol::try_from_owned(blueprint_protocol_name.to_string()) + .unwrap_or_else(|_| StreamProtocol::new("/blueprint_protocol/1.0.0")), + request_response::ProtocolSupport::Full, + )]; + + // Initialize gossipsub with message signing + let gossipsub_config = gossipsub::ConfigBuilder::default() + .heartbeat_interval(Duration::from_secs(1)) + .validation_mode(gossipsub::ValidationMode::Strict) + .mesh_n_low(2) + .mesh_n(4) + .mesh_n_high(8) + .gossip_lazy(3) + .history_length(10) + .history_gossip(3) + .flood_publish(true) + .build() + .expect("Valid gossipsub config"); + + let gossipsub = gossipsub::Behaviour::new( + gossipsub::MessageAuthenticity::Signed(local_key.clone()), + gossipsub_config, + ) + .expect("Valid gossipsub behaviour"); + + let config = request_response::Config::default() + .with_request_timeout(Duration::from_secs(30)) + .with_max_concurrent_streams(50); + + let blueprint_protocol = DerivedBlueprintProtocolBehaviour { + request_response: request_response::cbor::Behaviour::new(protocols, config), + gossipsub, + }; + + let local_peer_id = local_key.public().to_peer_id(); + + Self { + blueprint_protocol, + blueprint_protocol_name, + peer_manager, + local_peer_id, + instance_key_pair: instance_key_pair.clone(), + inbound_handshakes: DashMap::new(), + outbound_handshakes: DashMap::new(), + response_channels: DashMap::new(), + protocol_message_sender, + } + } + + /// Sign a handshake message for a peer + pub(crate) fn sign_handshake( + &self, + key_pair: &mut InstanceMsgKeyPair, + peer: &PeerId, + handshake_msg: &HandshakeMessage, + ) -> Option { + let msg = handshake_msg.to_bytes(peer); + match ::sign_with_secret(key_pair, &msg) { + Ok(signature) => { + let public_key = key_pair.public(); + let hex_msg = hex::encode(msg); + + debug!(%peer, ?hex_msg, %public_key, %signature, "signing handshake"); + Some(signature) + } + Err(e) => { + warn!("Failed to sign handshake message: {e}"); + None + } + } + } + + /// Send a request to a peer + pub fn send_request( + &mut self, + peer: &PeerId, + request: InstanceMessageRequest, + ) -> OutboundRequestId { + debug!(%peer, ?request, "sending request"); + self.blueprint_protocol + .request_response + .send_request(peer, request) + } + + /// Send a response through a response channel + pub fn send_response( + &mut self, + channel: ResponseChannel, + response: InstanceMessageResponse, + ) -> Result<(), InstanceMessageResponse> { + debug!(?response, "sending response"); + self.blueprint_protocol + .request_response + .send_response(channel, response) + } + + /// Subscribe to a gossip topic + pub fn subscribe(&mut self, topic: &str) -> Result { + let topic = Sha256Topic::new(topic); + self.blueprint_protocol.gossipsub.subscribe(&topic) + } + + /// Publish a message to a gossip topic + pub fn publish( + &mut self, + topic: &str, + data: impl Into>, + ) -> Result { + let topic = Sha256Topic::new(topic); + self.blueprint_protocol.gossipsub.publish(topic, data) + } + + /// Verify and handle a handshake with a peer + pub fn verify_handshake( + &self, + msg: &HandshakeMessage, + public_key: &InstanceMsgPublicKey, + signature: &InstanceSignedMsgSignature, + ) -> Result<(), InstanceMessageResponse> { + if msg.is_expired(HandshakeMessage::MAX_AGE) { + error!(%msg.sender, "Handshake message expired"); + return Err(InstanceMessageResponse::Error { + code: 400, + message: "Handshake message expired".to_string(), + }); + } + + let msg_bytes = msg.to_bytes(&self.local_peer_id); + let hex_msg = hex::encode(msg_bytes.clone()); + + debug!(%hex_msg, %public_key, %signature, "verifying handshake"); + + let valid = ::verify(public_key, &msg_bytes, signature); + if !valid { + warn!(%msg.sender, "Invalid handshake signature for peer"); + return Err(InstanceMessageResponse::Error { + code: 400, + message: "Invalid handshake signature".to_string(), + }); + } + + trace!(%msg.sender, "Handshake signature verified successfully"); + Ok(()) + } + + pub fn handle_handshake( + &self, + msg: &HandshakeMessage, + public_key: &InstanceMsgPublicKey, + signature: &InstanceSignedMsgSignature, + ) -> Result<(), InstanceMessageResponse> { + self.verify_handshake(msg, public_key, signature)?; + self.peer_manager + .add_peer_id_to_public_key(&msg.sender, public_key); + + Ok(()) + } + /// Handle a failed handshake with a peer + pub fn handle_handshake_failure(&self, peer: &PeerId, reason: &str) { + // Update peer info and potentially ban peer + if let Some(mut peer_info) = self.peer_manager.get_peer_info(peer) { + peer_info.failures += 1; + self.peer_manager.update_peer(*peer, peer_info.clone()); + + // Ban peer if too many failures + if peer_info.failures >= 3 { + self.peer_manager + .ban_peer(*peer, reason, Some(Duration::from_secs(300))); + } + } + } + + pub fn handle_gossipsub_event(&mut self, event: gossipsub::Event) { + match event { + gossipsub::Event::Message { + propagation_source, + message_id: _, + message, + } => { + // Only accept gossip from verified peers + if !self.peer_manager.is_peer_verified(&propagation_source) { + warn!(%propagation_source, "Received gossip from unverified peer"); + return; + } + + debug!(%propagation_source, "Received gossip message"); + + // Deserialize the protocol message + let Ok(protocol_message) = bincode::deserialize::(&message.data) + else { + warn!(%propagation_source, "Failed to deserialize gossip message"); + return; + }; + + debug!(%propagation_source, %protocol_message, "Forwarding gossip message to protocol handler"); + if let Err(e) = self.protocol_message_sender.send(protocol_message) { + warn!(%propagation_source, "Failed to forward gossip message: {e}"); + } + } + gossipsub::Event::Subscribed { peer_id, topic } => { + debug!(%peer_id, %topic, "Peer subscribed to topic"); + } + gossipsub::Event::Unsubscribed { peer_id, topic } => { + debug!(%peer_id, %topic, "Peer unsubscribed from topic"); + } + _ => {} + } + } +} + +impl NetworkBehaviour for BlueprintProtocolBehaviour { + type ConnectionHandler = + ::ConnectionHandler; + + type ToSwarm = BlueprintProtocolEvent; + + fn handle_established_inbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + local_addr: &libp2p::Multiaddr, + remote_addr: &libp2p::Multiaddr, + ) -> Result, ConnectionDenied> { + self.blueprint_protocol + .handle_established_inbound_connection(connection_id, peer, local_addr, remote_addr) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + addr: &Multiaddr, + role_override: libp2p::core::Endpoint, + port_use: PortUse, + ) -> Result, ConnectionDenied> { + self.blueprint_protocol + .handle_established_outbound_connection( + connection_id, + peer, + addr, + role_override, + port_use, + ) + } + + fn handle_pending_inbound_connection( + &mut self, + connection_id: ConnectionId, + local_addr: &libp2p::Multiaddr, + remote_addr: &libp2p::Multiaddr, + ) -> Result<(), ConnectionDenied> { + self.blueprint_protocol.handle_pending_inbound_connection( + connection_id, + local_addr, + remote_addr, + ) + } + + fn handle_pending_outbound_connection( + &mut self, + connection_id: ConnectionId, + maybe_peer: Option, + addresses: &[libp2p::Multiaddr], + effective_role: libp2p::core::Endpoint, + ) -> Result, ConnectionDenied> { + self.blueprint_protocol.handle_pending_outbound_connection( + connection_id, + maybe_peer, + addresses, + effective_role, + ) + } + + fn on_connection_handler_event( + &mut self, + peer_id: PeerId, + connection_id: ConnectionId, + event: THandlerOutEvent, + ) { + self.blueprint_protocol + .on_connection_handler_event(peer_id, connection_id, event); + } + + fn on_swarm_event(&mut self, event: FromSwarm<'_>) { + match &event { + FromSwarm::ConnectionEstablished(e) if e.other_established == 0 => { + // Start handshake if this peer is not verified + if !self.peer_manager.is_peer_verified(&e.peer_id) { + debug!( + "Established connection with unverified peer {:?}, sending handshake", + e.peer_id + ); + let mut key_pair = self.instance_key_pair.clone(); + + let handshake_msg = HandshakeMessage::new(self.local_peer_id); + let Some(signature) = + self.sign_handshake(&mut key_pair, &e.peer_id, &handshake_msg) + else { + return; + }; + + self.send_request( + &e.peer_id, + InstanceMessageRequest::Handshake { + public_key: key_pair.public(), + signature, + msg: handshake_msg, + }, + ); + self.outbound_handshakes.insert(e.peer_id, Instant::now()); + info!( + "Established connection to {:?}, sending handshake", + e.peer_id + ); + } + + self.blueprint_protocol + .gossipsub + .add_explicit_peer(&e.peer_id); + } + FromSwarm::ConnectionClosed(e) if e.remaining_established == 0 => { + if self.inbound_handshakes.contains_key(&e.peer_id) { + self.inbound_handshakes.remove(&e.peer_id); + } + + if self.outbound_handshakes.contains_key(&e.peer_id) { + self.outbound_handshakes.remove(&e.peer_id); + } + + if self.peer_manager.is_peer_verified(&e.peer_id) { + self.peer_manager + .remove_peer(&e.peer_id, "connection closed"); + } + + self.blueprint_protocol + .gossipsub + .remove_explicit_peer(&e.peer_id); + + self.peer_manager.remove_peer_id_from_public_key(&e.peer_id); + } + + _ => {} + } + + self.blueprint_protocol.on_swarm_event(event); + } + + fn poll( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> Poll>> { + while let Poll::Ready(ev) = self.blueprint_protocol.poll(cx) { + match ev { + ToSwarm::GenerateEvent(ev) => match ev { + DerivedBlueprintProtocolBehaviourEvent::RequestResponse( + blueprint_protocol_event, + ) => self.handle_request_response_event(blueprint_protocol_event), + DerivedBlueprintProtocolBehaviourEvent::Gossipsub(gossip_event) => { + self.handle_gossipsub_event(gossip_event); + } + }, + ToSwarm::Dial { opts } => { + return Poll::Ready(ToSwarm::Dial { opts }); + } + ToSwarm::NotifyHandler { + peer_id, + handler, + event, + } => { + return Poll::Ready(ToSwarm::NotifyHandler { + peer_id, + handler, + event, + }) + } + ToSwarm::CloseConnection { + peer_id, + connection, + } => { + return Poll::Ready(ToSwarm::CloseConnection { + peer_id, + connection, + }) + } + ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => { + return Poll::Ready(ToSwarm::RemoveListener { id }) + } + ToSwarm::NewExternalAddrCandidate(addr) => { + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(addr)) + } + ToSwarm::ExternalAddrConfirmed(addr) => { + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) + } + ToSwarm::ExternalAddrExpired(addr) => { + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) + } + _ => {} + } + } + Poll::Pending + } +} diff --git a/crates/networking/src/blueprint_protocol/handler.rs b/crates/networking/src/blueprint_protocol/handler.rs new file mode 100644 index 000000000..5c970b2a6 --- /dev/null +++ b/crates/networking/src/blueprint_protocol/handler.rs @@ -0,0 +1,268 @@ +use std::time::{Duration, Instant}; + +use libp2p::{request_response, PeerId}; +use tracing::{debug, warn}; + +use crate::blueprint_protocol::HandshakeMessage; +use crate::{key_types::InstanceMsgPublicKey, types::ProtocolMessage}; + +use super::{BlueprintProtocolBehaviour, InstanceMessageRequest, InstanceMessageResponse}; + +const INBOUND_HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(30); +const OUTBOUND_HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(30); + +impl BlueprintProtocolBehaviour { + #[allow(clippy::too_many_lines)] + pub fn handle_request_response_event( + &mut self, + event: request_response::Event, + ) { + match event { + request_response::Event::Message { + peer, + message: + request_response::Message::Request { + request: + InstanceMessageRequest::Handshake { + public_key, + signature, + msg, + }, + channel, + .. + }, + .. + } => { + debug!(%peer, "Received handshake request"); + + // Check if we already sent a handshake request to this peer + if self.outbound_handshakes.contains_key(&peer) { + // If we have an outbound handshake pending, we should still respond to their request + // This ensures both sides complete their handshakes even if messages cross on the wire + debug!(%peer, "Responding to inbound handshake request while outbound is pending"); + } + + if !self.peer_manager.is_key_whitelisted(&public_key) { + warn!(%peer, %public_key, "Received handshake response from unwhitelisted peer"); + self.peer_manager.handle_nonwhitelisted_peer(&peer); + return; + } + + // Verify the handshake + match self.verify_handshake(&msg, &public_key, &signature) { + Ok(()) => { + // Store the handshake request + self.inbound_handshakes.insert(peer, Instant::now()); + self.peer_manager + .add_peer_id_to_public_key(&peer, &public_key); + + // Send handshake response + let mut key_pair = self.instance_key_pair.clone(); + + let handshake_msg = HandshakeMessage::new(self.local_peer_id); + let Some(signature) = + self.sign_handshake(&mut key_pair, &peer, &handshake_msg) + else { + return; + }; + + let response = InstanceMessageResponse::Handshake { + public_key: key_pair.public().clone(), + signature, + msg: handshake_msg, + }; + + if let Err(e) = self.send_response(channel, response) { + warn!(%peer, "Failed to send handshake response: {:?}", e); + return; + } + } + Err(e) => { + warn!(%peer, "Invalid handshake request: {:?}", e); + let response = InstanceMessageResponse::Error { + code: 400, + message: format!("Invalid handshake: {:?}", e), + }; + if let Err(e) = self.send_response(channel, response) { + warn!(%peer, "Failed to send error response: {:?}", e); + } + } + } + } + request_response::Event::Message { + peer, + message: + request_response::Message::Response { + response: + InstanceMessageResponse::Handshake { + public_key, + signature, + msg, + }, + .. + }, + .. + } => { + debug!(%peer, "Received handshake response"); + + // Verify we have a pending outbound handshake + if !self.outbound_handshakes.contains_key(&peer) { + warn!(%peer, "Received unexpected handshake response"); + return; + } + + if !self.peer_manager.is_key_whitelisted(&public_key) { + warn!(%peer, "Received handshake response from unwhitelisted peer"); + self.peer_manager.handle_nonwhitelisted_peer(&peer); + return; + } + + // Verify the handshake + match self.verify_handshake(&msg, &public_key, &signature) { + Ok(()) => { + // Mark handshake as completed + self.complete_handshake(&peer, &public_key); + } + Err(e) => { + warn!(%peer, "Invalid handshake verification: {:?}", e); + self.outbound_handshakes.remove(&peer); + self.handle_handshake_failure(&peer, "Invalid handshake verification"); + } + } + } + request_response::Event::Message { + peer, + message: + request_response::Message::Request { + request: + InstanceMessageRequest::Protocol { + protocol, + payload, + metadata: _, + }, + channel, + .. + }, + .. + } => { + // Reject messages from self + if peer == self.local_peer_id { + return; + } + + // Only accept protocol messages from peers we've completed handshakes with + if !self.peer_manager.is_peer_verified(&peer) { + warn!(%peer, "Received protocol message from unverified peer"); + let response = InstanceMessageResponse::Error { + code: 403, + message: "Handshake required".to_string(), + }; + if let Err(e) = self.send_response(channel, response) { + warn!(%peer, "Failed to send error response: {:?}", e); + } + return; + } + + let protocol_message: ProtocolMessage = match bincode::deserialize(&payload) { + Ok(message) => message, + Err(e) => { + warn!(%peer, "Failed to deserialize protocol message: {:?}", e); + let response = InstanceMessageResponse::Error { + code: 400, + message: format!("Invalid protocol message: {:?}", e), + }; + if let Err(e) = self.send_response(channel, response) { + warn!(%peer, "Failed to send error response: {:?}", e); + } + return; + } + }; + + debug!(%peer, %protocol, %protocol_message, "Received protocol request"); + if let Err(e) = self.protocol_message_sender.send(protocol_message) { + warn!(%peer, "Failed to send protocol message: {:?}", e); + } + } + request_response::Event::Message { + peer, + message: + request_response::Message::Response { + response: InstanceMessageResponse::Error { code, message }, + .. + }, + .. + } => { + if !self.peer_manager.is_peer_verified(&peer) { + warn!(%peer, code, %message, "Received error response from unverified peer"); + return; + } + } + request_response::Event::Message { + peer, + message: + request_response::Message::Response { + response: InstanceMessageResponse::Success { protocol, data: _ }, + .. + }, + .. + } => { + debug!(%peer, %protocol, "Received successful protocol response"); + } + _ => {} + } + + // Check for expired handshakes + self.check_expired_handshakes(); + } + + /// Check for and remove expired handshakes + fn check_expired_handshakes(&mut self) { + let now = Instant::now(); + + // Check inbound handshakes + let expired_inbound: Vec<_> = self + .inbound_handshakes + .clone() + .into_read_only() + .iter() + .filter(|(_, &time)| now.duration_since(time) > INBOUND_HANDSHAKE_TIMEOUT) + .map(|(peer_id, _)| *peer_id) + .collect(); + + for peer_id in expired_inbound { + self.inbound_handshakes.remove(&peer_id); + self.handle_handshake_failure(&peer_id, "Inbound handshake timeout"); + } + + // Check outbound handshakes + let expired_outbound: Vec<_> = self + .outbound_handshakes + .clone() + .into_read_only() + .iter() + .filter(|(_, &time)| now.duration_since(time) > OUTBOUND_HANDSHAKE_TIMEOUT) + .map(|(peer_id, _)| *peer_id) + .collect(); + + for peer_id in expired_outbound { + self.outbound_handshakes.remove(&peer_id); + self.handle_handshake_failure(&peer_id, "Outbound handshake timeout"); + } + } + + /// Complete a successful handshake + fn complete_handshake(&mut self, peer: &PeerId, public_key: &InstanceMsgPublicKey) { + debug!(%peer, "Completed handshake"); + + // Remove from pending handshakes + self.inbound_handshakes.remove(peer); + self.outbound_handshakes.remove(peer); + + // Update peer manager + self.peer_manager + .add_peer_id_to_public_key(peer, public_key); + + // Add to verified peers + self.peer_manager.verify_peer(peer); + } +} diff --git a/crates/networking/src/blueprint_protocol/mod.rs b/crates/networking/src/blueprint_protocol/mod.rs new file mode 100644 index 000000000..dc1776f96 --- /dev/null +++ b/crates/networking/src/blueprint_protocol/mod.rs @@ -0,0 +1,114 @@ +mod behaviour; +mod handler; + +pub use behaviour::{BlueprintProtocolBehaviour, BlueprintProtocolEvent}; +use libp2p::PeerId; + +use crate::key_types::{InstanceMsgPublicKey, InstanceSignedMsgSignature}; +use serde::{Deserialize, Serialize}; + +/// A message sent to a specific instance or broadcast to all instances +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum InstanceMessageRequest { + /// Handshake request with authentication + Handshake { + /// Public key for authentication + public_key: InstanceMsgPublicKey, + /// Signature for verification + signature: InstanceSignedMsgSignature, + /// Handshake message + msg: HandshakeMessage, + }, + /// Protocol-specific message with custom payload + Protocol { + /// Protocol identifier (e.g., "consensus/1.0.0", "sync/1.0.0") + protocol: String, + /// Protocol-specific message payload + payload: Vec, + /// Optional metadata for the protocol handler + metadata: Option>, + }, +} + +/// Response to an instance message +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum InstanceMessageResponse { + /// Handshake response with authentication + Handshake { + /// Public key for authentication + public_key: InstanceMsgPublicKey, + /// Signature for verification + signature: InstanceSignedMsgSignature, + /// Handshake message + msg: HandshakeMessage, + }, + /// Success response with optional data + Success { + /// Protocol identifier (e.g., "consensus/1.0.0", "sync/1.0.0") + protocol: String, + /// Response data specific to the protocol + data: Option>, + }, + /// Error response with details + Error { + /// Error code + code: u16, + /// Error message + message: String, + }, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct HandshakeMessage { + /// Sender [`PeerId`] + pub sender: PeerId, + /// A Unix timestamp in milliseconds + pub timestamp: u128, +} + +impl HandshakeMessage { + /// Maximum age for a handshake message in milliseconds + pub const MAX_AGE: u128 = 30_000; + + /// Creates a new handshake message + /// + /// # Panics + /// - If the system time is before the Unix epoch + #[must_use] + pub fn new(sender: PeerId) -> Self { + let timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .expect("time went backwards") + .as_millis(); + Self { sender, timestamp } + } + + /// Checks if the handshake message is expired + /// + /// # Arguments + /// - `max_age`: Maximum age in milliseconds + /// + /// # Returns + /// - `true` if the message is expired, `false` otherwise + /// + /// # Panics + /// - If the system time is before the Unix epoch + #[must_use] + pub fn is_expired(&self, max_age: u128) -> bool { + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .expect("time went backwards") + .as_millis(); + now.saturating_sub(self.timestamp) > max_age + } + + /// Converts the handshake message to a byte array + #[must_use] + pub fn to_bytes(&self, other_peer_id: &PeerId) -> Vec { + let mut bytes = Vec::new(); + bytes.extend(&self.sender.to_bytes()); + bytes.extend(other_peer_id.to_bytes()); + bytes.extend(&self.timestamp.to_be_bytes()); + bytes + } +} diff --git a/crates/networking/src/discovery/behaviour.rs b/crates/networking/src/discovery/behaviour.rs new file mode 100644 index 000000000..10f872618 --- /dev/null +++ b/crates/networking/src/discovery/behaviour.rs @@ -0,0 +1,394 @@ +use std::{ + cmp, + collections::{HashMap, HashSet, VecDeque}, + task::{Context, Poll}, + time::Duration, +}; + +use crate::error::{Error, Result as NetworkingResult}; +use libp2p::{ + autonat, + core::Multiaddr, + identify, + identity::PeerId, + kad::{self, store::MemoryStore}, + mdns::{self, Event as MdnsEvent}, + relay, + swarm::{ + behaviour::toggle::Toggle, derive_prelude::*, dial_opts::DialOpts, NetworkBehaviour, + ToSwarm, + }, + upnp, +}; +use tokio::time::Interval; +use tracing::trace; +use tracing::{debug, info}; + +use super::PeerInfo; + +#[derive(NetworkBehaviour)] +pub struct DerivedDiscoveryBehaviour { + /// Kademlia discovery + pub kademlia: Toggle>, + /// Local network discovery via mDNS + pub mdns: Toggle, + /// Identify protocol for peer information exchange + pub identify: identify::Behaviour, + /// NAT traversal + pub autonat: autonat::Behaviour, + /// `UPnP` port mapping + pub upnp: Toggle, + /// Circuit relay for NAT traversal + pub relay: Toggle, +} + +/// Event generated by the `DiscoveryBehaviour`. +#[derive(Debug)] +pub enum DiscoveryEvent { + /// Event that notifies that we connected to the node with the given peer + /// id. + PeerConnected(PeerId), + + /// Event that notifies that we disconnected with the node with the given + /// peer id. + PeerDisconnected(PeerId), + + /// Discovery event + Discovery(Box), +} + +pub struct DiscoveryBehaviour { + /// Discovery behaviour + pub discovery: DerivedDiscoveryBehaviour, + /// Stream that fires when we need to perform the next random Kademlia + /// query. + pub next_kad_random_query: Interval, + /// After `next_kad_random_query` triggers, the next one triggers after this + /// duration. + pub duration_to_next_kad: Duration, + /// Events to return in priority when polled. + pub pending_events: VecDeque, + /// Number of nodes we're currently connected to. + pub n_node_connected: u32, + /// Peers + pub peers: HashSet, + /// Peer info + pub peer_info: HashMap, + /// Target peer count + pub target_peer_count: u32, + /// Options to configure dials to known peers. + pub pending_dial_opts: VecDeque, +} + +impl DiscoveryBehaviour { + /// Bootstrap Kademlia network + pub fn bootstrap(&mut self) -> NetworkingResult { + if let Some(active_kad) = self.discovery.kademlia.as_mut() { + active_kad.bootstrap().map_err(Into::into) + } else { + Err(Error::KademliaNotActivated) + } + } + + #[must_use] + pub fn get_peers(&self) -> &HashSet { + &self.peers + } + + #[must_use] + pub fn get_peer_info(&self, peer_id: &PeerId) -> Option<&PeerInfo> { + self.peer_info.get(peer_id) + } + + #[must_use] + pub fn nat_status(&self) -> autonat::NatStatus { + self.discovery.autonat.nat_status() + } + + #[must_use] + pub fn get_peer_addresses(&self) -> HashMap> { + self.peer_info + .iter() + .map(|(peer_id, info)| (*peer_id, info.addresses.clone())) + .collect() + } +} + +impl NetworkBehaviour for DiscoveryBehaviour { + type ConnectionHandler = ::ConnectionHandler; + type ToSwarm = DiscoveryEvent; + + fn handle_established_inbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, + ) -> Result, ConnectionDenied> { + debug!(%peer, "Handling inbound connection"); + self.discovery.handle_established_inbound_connection( + connection_id, + peer, + local_addr, + remote_addr, + ) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + addr: &libp2p::Multiaddr, + role_override: libp2p::core::Endpoint, + port_use: PortUse, + ) -> Result, ConnectionDenied> { + debug!(%peer, "Handling outbound connection"); + self.peer_info + .entry(peer) + .or_insert_with(|| { + debug!(%peer, "Creating new peer info for outbound connection"); + PeerInfo { + addresses: HashSet::new(), + identify_info: None, + last_seen: std::time::SystemTime::now(), + ping_latency: None, + successes: 0, + failures: 0, + average_response_time: None, + } + }) + .addresses + .insert(addr.clone()); + self.discovery.handle_established_outbound_connection( + connection_id, + peer, + addr, + role_override, + port_use, + ) + } + + fn on_swarm_event(&mut self, event: FromSwarm<'_>) { + match &event { + FromSwarm::ConnectionEstablished(e) => { + if e.other_established == 0 { + debug!(%e.peer_id, "First connection established with peer"); + self.n_node_connected += 1; + self.peers.insert(e.peer_id); + self.pending_events + .push_back(DiscoveryEvent::PeerConnected(e.peer_id)); + } + } + FromSwarm::ConnectionClosed(e) => { + if e.remaining_established == 0 { + debug!(%e.peer_id, "Last connection closed with peer"); + self.n_node_connected -= 1; + self.peers.remove(&e.peer_id); + self.peer_info.remove(&e.peer_id); + self.pending_events + .push_back(DiscoveryEvent::PeerDisconnected(e.peer_id)); + } + } + _ => {} + } + self.discovery.on_swarm_event(event); + } + + fn on_connection_handler_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + event: THandlerOutEvent, + ) { + self.discovery + .on_connection_handler_event(peer_id, connection, event); + } + + #[allow(clippy::type_complexity)] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { + // Immediately process the content of `discovered`. + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(ToSwarm::GenerateEvent(ev)); + } + + // Dial to peers + if let Some(opts) = self.pending_dial_opts.pop_front() { + return Poll::Ready(ToSwarm::Dial { opts }); + } + + // Poll the stream that fires when we need to start a random Kademlia query. + while self.next_kad_random_query.poll_tick(cx).is_ready() { + if self.n_node_connected < self.target_peer_count { + // We still have not hit the discovery max, send random request for peers. + let random_peer_id = PeerId::random(); + debug!( + "Libp2p <= Starting random Kademlia request for {:?}", + random_peer_id + ); + if let Some(kademlia) = self.discovery.kademlia.as_mut() { + kademlia.get_closest_peers(random_peer_id); + } + } + + // Schedule the next random query with exponentially increasing delay, + // capped at 60 seconds. + self.next_kad_random_query = tokio::time::interval(self.duration_to_next_kad); + // we need to reset the interval, otherwise the next tick completes immediately. + self.next_kad_random_query.reset(); + + self.duration_to_next_kad = + cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); + } + + // Poll discovery events. + while let Poll::Ready(ev) = self.discovery.poll(cx) { + match ev { + ToSwarm::GenerateEvent(ev) => { + match &ev { + DerivedDiscoveryBehaviourEvent::Identify(identify::Event::Received { + peer_id, + info, + connection_id, + }) => { + debug!(%peer_id, "Received identify event in discovery behaviour"); + self.peer_info.entry(*peer_id).or_default().identify_info = + Some(info.clone()); + if let Some(kademlia) = self.discovery.kademlia.as_mut() { + for address in &info.listen_addrs { + kademlia.add_address(peer_id, address.clone()); + } + } + self.pending_events + .push_back(DiscoveryEvent::Discovery(Box::new( + DerivedDiscoveryBehaviourEvent::Identify( + identify::Event::Received { + peer_id: *peer_id, + info: info.clone(), + connection_id: *connection_id, + }, + ), + ))); + } + DerivedDiscoveryBehaviourEvent::Identify(identify::Event::Sent { + .. + }) => { + debug!("Identify event sent"); + } + DerivedDiscoveryBehaviourEvent::Identify(identify::Event::Pushed { + .. + }) => { + debug!("Identify event pushed"); + } + DerivedDiscoveryBehaviourEvent::Identify(identify::Event::Error { + .. + }) => { + debug!("Identify event error"); + } + + DerivedDiscoveryBehaviourEvent::Autonat(_) => {} + DerivedDiscoveryBehaviourEvent::Upnp(ev) => match ev { + upnp::Event::NewExternalAddr(addr) => { + info!("UPnP NewExternalAddr: {addr}"); + } + upnp::Event::ExpiredExternalAddr(addr) => { + info!("UPnP ExpiredExternalAddr: {addr}"); + } + upnp::Event::GatewayNotFound => { + info!("UPnP GatewayNotFound"); + } + upnp::Event::NonRoutableGateway => { + info!("UPnP NonRoutableGateway"); + } + }, + DerivedDiscoveryBehaviourEvent::Kademlia(ev) => match ev { + // Adding to Kademlia buckets is automatic with our config, + // no need to do manually. + kad::Event::RoutingUpdated { .. } + | kad::Event::RoutablePeer { .. } + | kad::Event::PendingRoutablePeer { .. } => { + // Intentionally ignore + } + other => { + trace!("Libp2p => Unhandled Kademlia event: {:?}", other); + } + }, + DerivedDiscoveryBehaviourEvent::Mdns(ev) => match ev { + MdnsEvent::Discovered(list) => { + if self.n_node_connected >= self.target_peer_count { + // Already over discovery max, don't add discovered peers. + // We could potentially buffer these addresses to be added later, + // but mdns is not an important use case and may be removed in future. + continue; + } + + // Add any discovered peers to Kademlia + for (peer_id, multiaddr) in list { + if let Some(kad) = self.discovery.kademlia.as_mut() { + kad.add_address(peer_id, multiaddr.clone()); + } + } + } + MdnsEvent::Expired(_) => {} + }, + DerivedDiscoveryBehaviourEvent::Relay(relay_event) => match relay_event { + relay::Event::ReservationReqAccepted { src_peer_id, .. } => { + debug!("Relay accepted reservation request from: {src_peer_id:#?}"); + } + relay::Event::ReservationReqDenied { src_peer_id } => { + debug!("Reservation request was denied for: {src_peer_id:#?}"); + } + relay::Event::ReservationTimedOut { src_peer_id } => { + debug!("Reservation timed out for: {src_peer_id:#?}"); + } + _ => {} + }, + } + self.pending_events + .push_back(DiscoveryEvent::Discovery(Box::new(ev))); + } + ToSwarm::Dial { opts } => { + return Poll::Ready(ToSwarm::Dial { opts }); + } + ToSwarm::NotifyHandler { + peer_id, + handler, + event, + } => { + return Poll::Ready(ToSwarm::NotifyHandler { + peer_id, + handler, + event, + }) + } + ToSwarm::CloseConnection { + peer_id, + connection, + } => { + return Poll::Ready(ToSwarm::CloseConnection { + peer_id, + connection, + }) + } + ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => { + return Poll::Ready(ToSwarm::RemoveListener { id }) + } + ToSwarm::NewExternalAddrCandidate(addr) => { + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(addr)) + } + ToSwarm::ExternalAddrConfirmed(addr) => { + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) + } + ToSwarm::ExternalAddrExpired(addr) => { + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) + } + _ => {} + } + } + + Poll::Pending + } +} diff --git a/crates/networking/src/discovery/config.rs b/crates/networking/src/discovery/config.rs new file mode 100644 index 000000000..a90522a14 --- /dev/null +++ b/crates/networking/src/discovery/config.rs @@ -0,0 +1,188 @@ +use super::{ + behaviour::{DerivedDiscoveryBehaviour, DiscoveryBehaviour}, + new_kademlia, +}; +use crate::error::Result; +use libp2p::{ + autonat, identify, identity::PublicKey, mdns, relay, upnp, Multiaddr, PeerId, StreamProtocol, +}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + time::Duration, +}; +use tracing::warn; + +pub struct DiscoveryConfig { + /// The local peer ID. + local_peer_id: PeerId, + /// The local public key. + local_public_key: PublicKey, + /// The bootstrap peers. + bootstrap_peers: Vec<(PeerId, Multiaddr)>, + /// The relay nodes. + relay_nodes: Vec<(PeerId, Multiaddr)>, + /// The number of peers to connect to. + target_peer_count: u32, + /// Enable mDNS discovery. + enable_mdns: bool, + /// Enable Kademlia discovery. + enable_kademlia: bool, + /// Enable `UPnP` discovery. + enable_upnp: bool, + /// Enable relay nodes. + enable_relay: bool, + /// The name of the network. + network_name: String, + /// Protocol version string that uniquely identifies your P2P service. + /// This should be unique to your application to avoid conflicts with other P2P networks. + /// Format recommendation: "/" + /// Example: "my-blockchain/1.0.0" or "my-chat-app/0.1.0" + protocol_version: String, +} + +impl DiscoveryConfig { + #[must_use] + pub fn new(local_public_key: PublicKey, network_name: impl Into) -> Self { + Self { + local_peer_id: local_public_key.to_peer_id(), + local_public_key, + bootstrap_peers: Vec::new(), + relay_nodes: Vec::new(), + target_peer_count: 25, // Reasonable default + enable_mdns: true, // Enable by default for local development + enable_kademlia: true, // Enable by default for production + enable_upnp: true, // Enable by default for better connectivity + enable_relay: true, // Enable by default for relay functionality + network_name: network_name.into(), + protocol_version: String::from("gadget/1.0.0"), // Default version + } + } + + /// Set the protocol version that uniquely identifies your P2P service. + /// This should be unique to your application to avoid conflicts with other P2P networks. + /// Format recommendation: "/" + #[must_use] + pub fn protocol_version(mut self, version: impl Into) -> Self { + self.protocol_version = version.into(); + self + } + + #[must_use] + pub fn bootstrap_peers(mut self, peers: Vec<(PeerId, Multiaddr)>) -> Self { + self.bootstrap_peers = peers; + self + } + + #[must_use] + pub fn relay_nodes(mut self, nodes: Vec<(PeerId, Multiaddr)>) -> Self { + self.relay_nodes = nodes; + self + } + + #[must_use] + pub fn target_peer_count(mut self, count: u32) -> Self { + self.target_peer_count = count; + self + } + + #[must_use] + pub fn mdns(mut self, enable: bool) -> Self { + self.enable_mdns = enable; + self + } + + #[must_use] + pub fn kademlia(mut self, enable: bool) -> Self { + self.enable_kademlia = enable; + self + } + + #[must_use] + pub fn upnp(mut self, enable: bool) -> Self { + self.enable_upnp = enable; + self + } + + #[must_use] + pub fn relay(mut self, enable: bool) -> Self { + self.enable_relay = enable; + self + } + + /// Construct this [`DiscoveryConfig`] into a [`DiscoveryBehaviour`] + /// + /// # Errors + /// + /// If `mdns` is enabled, see [mdns::Behaviour::new] + pub fn build(self) -> Result { + let kademlia_opt = if self.enable_kademlia { + let protocol = StreamProtocol::try_from_owned(format!( + "/gadget/kad/{}/kad/1.0.0", + self.network_name + ))?; + + let mut kademlia = new_kademlia(self.local_peer_id, protocol); + + // Add bootstrap peers + for (peer_id, addr) in &self.bootstrap_peers { + kademlia.add_address(peer_id, addr.clone()); + } + + // Start bootstrap process + if let Err(e) = kademlia.bootstrap() { + warn!("Kademlia bootstrap failed: {}", e); + } + + Some(kademlia) + } else { + None + }; + + let mdns_opt = if self.enable_mdns { + Some(mdns::Behaviour::new( + mdns::Config::default(), + self.local_peer_id, + )?) + } else { + None + }; + + let upnp_opt = if self.enable_upnp { + Some(upnp::tokio::Behaviour::default()) + } else { + None + }; + + let relay_opt = if self.enable_relay { + let relay = relay::Behaviour::new(self.local_peer_id, relay::Config::default()); + Some(relay) + } else { + None + }; + + let behaviour = DerivedDiscoveryBehaviour { + kademlia: kademlia_opt.into(), + mdns: mdns_opt.into(), + identify: identify::Behaviour::new( + identify::Config::new(self.protocol_version, self.local_public_key) + .with_agent_version(format!("gadget-{}", env!("CARGO_PKG_VERSION"))) + .with_push_listen_addr_updates(true), + ), + autonat: autonat::Behaviour::new(self.local_peer_id, autonat::Config::default()), + upnp: upnp_opt.into(), + relay: relay_opt.into(), + }; + + Ok(DiscoveryBehaviour { + discovery: behaviour, + peers: HashSet::new(), + peer_info: HashMap::new(), + target_peer_count: self.target_peer_count, + next_kad_random_query: tokio::time::interval(Duration::from_secs(1)), + duration_to_next_kad: Duration::from_secs(1), + pending_events: VecDeque::new(), + n_node_connected: 0, + pending_dial_opts: VecDeque::new(), + }) + } +} diff --git a/crates/networking/src/discovery/mod.rs b/crates/networking/src/discovery/mod.rs new file mode 100644 index 000000000..2b63a01e7 --- /dev/null +++ b/crates/networking/src/discovery/mod.rs @@ -0,0 +1,32 @@ +use std::{num::NonZero, time::Duration}; + +use libp2p::{ + kad::{self, store::MemoryStore}, + PeerId, StreamProtocol, +}; + +pub mod behaviour; +pub mod config; +pub mod peers; + +pub use peers::{PeerEvent, PeerInfo, PeerManager}; + +#[must_use] +#[allow(clippy::missing_panics_doc)] +pub fn new_kademlia(peer_id: PeerId, protocol: StreamProtocol) -> kad::Behaviour { + let store = kad::store::MemoryStore::new(peer_id); + let mut config = kad::Config::new(protocol); + + // Optimize Kademlia configuration + config + .set_query_timeout(Duration::from_secs(60)) + .set_replication_factor(NonZero::new(3).unwrap()) + .set_publication_interval(Some(Duration::from_secs(120))) + .set_provider_record_ttl(Some(Duration::from_secs(24 * 60 * 60))) + .set_record_ttl(Some(Duration::from_secs(24 * 60 * 60))) + .set_parallelism(NonZero::new(5).unwrap()); + + let mut kademlia = kad::Behaviour::with_config(peer_id, store, config); + kademlia.set_mode(Some(kad::Mode::Server)); + kademlia +} diff --git a/crates/networking/src/discovery/peers.rs b/crates/networking/src/discovery/peers.rs new file mode 100644 index 000000000..db83fb7e9 --- /dev/null +++ b/crates/networking/src/discovery/peers.rs @@ -0,0 +1,286 @@ +use std::{ + collections::HashSet, + sync::Arc, + time::{Duration, Instant, SystemTime}, +}; + +use crate::InstanceMsgPublicKey; +use dashmap::{DashMap, DashSet}; +use libp2p::{core::Multiaddr, identify, PeerId}; +use tokio::sync::broadcast; +use tracing::debug; + +/// Information about a peer's connection and behavior +#[derive(Clone, Debug)] +pub struct PeerInfo { + /// Known addresses for the peer + pub addresses: HashSet, + /// Information from the identify protocol + pub identify_info: Option, + /// When the peer was last seen + pub last_seen: SystemTime, + /// Latest ping latency + pub ping_latency: Option, + /// Number of successful protocol interactions + pub successes: u32, + /// Number of failed protocol interactions + pub failures: u32, + /// Average response time for protocol requests + pub average_response_time: Option, +} + +impl Default for PeerInfo { + fn default() -> Self { + Self { + addresses: HashSet::new(), + identify_info: None, + last_seen: SystemTime::now(), + ping_latency: None, + successes: 0, + failures: 0, + average_response_time: None, + } + } +} + +#[derive(Debug, Clone)] +pub enum PeerEvent { + /// A peer was added or updated + PeerUpdated { peer_id: PeerId, info: PeerInfo }, + /// A peer was removed + PeerRemoved { peer_id: PeerId, reason: String }, + /// A peer was banned + PeerBanned { + peer_id: PeerId, + reason: String, + expires_at: Option, + }, + /// A peer was unbanned + PeerUnbanned { peer_id: PeerId }, +} + +pub struct PeerManager { + /// Active peers and their information + peers: DashMap, + /// Verified peers from completed handshakes + verified_peers: DashSet, + /// Handshake keys to peer ids + public_keys_to_peer_ids: Arc>, + /// Banned peers with optional expiration time + banned_peers: DashMap>, + /// Allowed public keys + whitelisted_keys: DashSet, + /// Event sender for peer updates + event_tx: broadcast::Sender, +} + +impl Default for PeerManager { + fn default() -> Self { + Self::new(HashSet::default()) + } +} + +impl PeerManager { + #[must_use] + pub fn new(whitelisted_keys: HashSet) -> Self { + let (event_tx, _) = broadcast::channel(100); + Self { + peers: DashMap::default(), + banned_peers: DashMap::default(), + verified_peers: DashSet::default(), + public_keys_to_peer_ids: Arc::new(DashMap::default()), + whitelisted_keys: DashSet::from_iter(whitelisted_keys), + event_tx, + } + } + + pub fn update_whitelisted_keys(&self, keys: HashSet) { + self.whitelisted_keys.clear(); + for key in keys { + self.whitelisted_keys.insert(key); + } + } + + #[must_use] + pub fn is_key_whitelisted(&self, key: &InstanceMsgPublicKey) -> bool { + self.whitelisted_keys.contains(key) + } + + pub fn handle_nonwhitelisted_peer(&self, peer: &PeerId) { + self.remove_peer(peer, "non-whitelisted"); + self.ban_peer(*peer, "non-whitelisted", None); + } + + /// Get a subscription to peer events + #[must_use] + pub fn subscribe(&self) -> broadcast::Receiver { + self.event_tx.subscribe() + } + + /// Update or add peer information + pub fn update_peer(&self, peer_id: PeerId, mut info: PeerInfo) { + // Update last seen time + info.last_seen = SystemTime::now(); + + // Insert or update peer info + self.peers.insert(peer_id, info.clone()); + + // Emit event + let _ = self.event_tx.send(PeerEvent::PeerUpdated { peer_id, info }); + } + + /// Remove a peer + pub fn remove_peer(&self, peer_id: &PeerId, reason: impl Into) { + if self.peers.remove(peer_id).is_some() { + let reason = reason.into(); + debug!(%peer_id, %reason, "removed peer"); + let _ = self.event_tx.send(PeerEvent::PeerRemoved { + peer_id: *peer_id, + reason, + }); + } + } + + /// Verify a peer + pub fn verify_peer(&self, peer_id: &PeerId) { + self.verified_peers.insert(*peer_id); + } + + /// Check if a peer is verified + #[must_use] + pub fn is_peer_verified(&self, peer_id: &PeerId) -> bool { + self.verified_peers.contains(peer_id) + } + + /// Ban a peer with optional expiration + pub fn ban_peer(&self, peer_id: PeerId, reason: impl Into, duration: Option) { + let expires_at = duration.map(|d| Instant::now() + d); + + // Remove from active peers + self.remove_peer(&peer_id, "banned"); + + // Add to banned peers + self.banned_peers.insert(peer_id, expires_at); + + let reason = reason.into(); + debug!(%peer_id, %reason, "banned peer"); + let _ = self.event_tx.send(PeerEvent::PeerBanned { + peer_id, + reason, + expires_at, + }); + } + + /// Bans a peer with the default duration(`1h`) + pub fn ban_peer_with_default_duration(&self, peer: PeerId, reason: impl Into) { + const BAN_PEER_DURATION: Duration = Duration::from_secs(60 * 60); //1h + self.ban_peer(peer, reason, Some(BAN_PEER_DURATION)); + } + + /// Unban a peer + pub fn unban_peer(&self, peer_id: &PeerId) { + if self.banned_peers.remove(peer_id).is_some() { + debug!(%peer_id, "unbanned peer"); + let _ = self + .event_tx + .send(PeerEvent::PeerUnbanned { peer_id: *peer_id }); + } + } + + /// Check if a peer is banned + #[must_use] + pub fn is_banned(&self, peer_id: &PeerId) -> bool { + self.banned_peers.contains_key(peer_id) + } + + /// Log a successful interaction with a peer + pub fn log_success(&self, peer_id: &PeerId, duration: Duration) { + if let Some(mut info) = self.peers.get_mut(peer_id) { + info.successes += 1; + update_average_time(&mut info, duration); + self.update_peer(*peer_id, info.clone()); + } + } + + /// Log a failed interaction with a peer + pub fn log_failure(&self, peer_id: &PeerId, duration: Duration) { + if let Some(mut info) = self.peers.get_mut(peer_id) { + info.failures += 1; + update_average_time(&mut info, duration); + self.update_peer(*peer_id, info.clone()); + } + } + + /// Get peer information + #[must_use] + pub fn get_peer_info(&self, peer_id: &PeerId) -> Option { + self.peers.get(peer_id).map(|info| info.value().clone()) + } + + /// Get all active peers + #[must_use] + pub fn get_peers(&self) -> DashMap { + self.peers.clone() + } + + /// Get number of active peers + #[must_use] + pub fn peer_count(&self) -> usize { + self.peers.len() + } + + /// Start the background task to clean up expired bans + pub async fn run_ban_cleanup(self: Arc) { + loop { + let now = Instant::now(); + let mut to_unban = Vec::new(); + + // Find expired bans + let banned_peers = self.banned_peers.clone().into_read_only(); + for (peer_id, expires_at) in banned_peers.iter() { + if let Some(expiry) = expires_at { + if now >= *expiry { + to_unban.push(*peer_id); + } + } + } + + // Unban expired peers + for peer_id in to_unban { + self.unban_peer(&peer_id); + } + + tokio::time::sleep(Duration::from_secs(60)).await; + } + } + + /// Add a peer id to the public key to peer id map after verifying handshake + pub fn add_peer_id_to_public_key(&self, peer_id: &PeerId, public_key: &InstanceMsgPublicKey) { + self.public_keys_to_peer_ids.insert(*public_key, *peer_id); + } + + /// Remove a peer id from the public key to peer id map + pub fn remove_peer_id_from_public_key(&self, peer_id: &PeerId) { + self.public_keys_to_peer_ids.retain(|_, id| id != peer_id); + } + + #[must_use] + pub fn get_peer_id_from_public_key(&self, public_key: &InstanceMsgPublicKey) -> Option { + self.public_keys_to_peer_ids.get(public_key).map(|id| *id) + } +} + +/// Update the average response time for a peer +fn update_average_time(info: &mut PeerInfo, duration: Duration) { + const ALPHA: u32 = 5; // Smoothing factor for the moving average + + if info.average_response_time.is_none() { + info.average_response_time = Some(duration); + } else if duration < info.average_response_time.unwrap() { + let delta = (info.average_response_time.unwrap() - duration) / ALPHA; + info.average_response_time = Some(info.average_response_time.unwrap() - delta); + } else { + let delta = (duration - info.average_response_time.unwrap()) / ALPHA; + info.average_response_time = Some(info.average_response_time.unwrap() + delta); + } +} diff --git a/crates/networking/src/error.rs b/crates/networking/src/error.rs index 9c4d13ead..b424d28f0 100644 --- a/crates/networking/src/error.rs +++ b/crates/networking/src/error.rs @@ -1,3 +1,7 @@ +use crate::NetworkEvent; + +pub type Result = core::result::Result; + #[derive(Debug, thiserror::Error)] pub enum Error { #[error("Network error: {0}")] @@ -32,6 +36,9 @@ pub enum Error { #[error("No network found")] NoNetworkFound, + #[error("Kademlia is not activated")] + KademliaNotActivated, + #[error("Other error: {0}")] Other(String), @@ -40,16 +47,33 @@ pub enum Error { Io(#[from] std::io::Error), // libp2p compat + #[error(transparent)] + InvalidProtocol(#[from] libp2p::swarm::InvalidProtocol), + + #[error(transparent)] + NoKnownPeers(#[from] libp2p::kad::NoKnownPeers), + #[error(transparent)] Dial(#[from] libp2p::swarm::DialError), + #[error(transparent)] Noise(#[from] libp2p::noise::Error), + #[error(transparent)] Behaviour(#[from] libp2p::BehaviourBuilderError), + #[error(transparent)] Subscription(#[from] libp2p::gossipsub::SubscriptionError), + #[error(transparent)] TransportIo(#[from] libp2p::TransportError), + #[error(transparent)] Multiaddr(#[from] libp2p::multiaddr::Error), + + #[error(transparent)] + TokioSendError(#[from] tokio::sync::mpsc::error::SendError), + + #[error(transparent)] + CrossbeamSendError(#[from] crossbeam_channel::SendError), } diff --git a/crates/networking/src/gossip.rs b/crates/networking/src/gossip.rs deleted file mode 100644 index f63465c41..000000000 --- a/crates/networking/src/gossip.rs +++ /dev/null @@ -1,430 +0,0 @@ -#![allow( - missing_debug_implementations, - unused_results, - clippy::module_name_repetitions, - clippy::exhaustive_enums -)] - -use crate::error::Error; -use crate::key_types::{GossipMsgKeyPair, GossipMsgPublicKey, GossipSignedMsgSignature}; -use async_trait::async_trait; -use gadget_crypto::hashing::blake3_256; -use gadget_std::collections::BTreeMap; -use gadget_std::string::ToString; -use gadget_std::sync::atomic::AtomicUsize; -use gadget_std::sync::Arc; -use libp2p::gossipsub::IdentTopic; -use libp2p::kad::store::MemoryStore; -use libp2p::{ - gossipsub, mdns, request_response, swarm::NetworkBehaviour, swarm::SwarmEvent, PeerId, -}; -use lru_mem::LruCache; -use serde::{Deserialize, Serialize}; -use tokio::sync::mpsc::UnboundedSender; -use tokio::sync::{Mutex, RwLock}; - -use crate::networking::{Network, ParticipantInfo, ProtocolMessage}; -use gadget_std as std; -use gadget_std::{boxed::Box, format, string::String, vec::Vec}; -use std::vec; - -/// Maximum allowed size for a Signed Message. -pub const MAX_MESSAGE_SIZE: usize = 16 * 1024 * 1024; - -// We create a custom network behaviour that combines Gossipsub and Mdns. -#[derive(NetworkBehaviour)] -pub struct MyBehaviour { - pub gossipsub: gossipsub::Behaviour, - pub mdns: mdns::tokio::Behaviour, - pub p2p: request_response::cbor::Behaviour, - pub identify: libp2p::identify::Behaviour, - pub kadmelia: libp2p::kad::Behaviour, - pub dcutr: libp2p::dcutr::Behaviour, - pub relay: libp2p::relay::Behaviour, - pub ping: libp2p::ping::Behaviour, -} - -pub type InboundMapping = (IdentTopic, UnboundedSender>, Arc); - -pub struct NetworkServiceWithoutSwarm<'a> { - pub inbound_mapping: &'a [InboundMapping], - pub public_key_to_libp2p_id: Arc>>, - pub secret_key: &'a GossipMsgKeyPair, - pub connected_peers: Arc, - pub span: tracing::Span, - pub my_id: PeerId, -} - -impl<'a> NetworkServiceWithoutSwarm<'a> { - pub(crate) fn with_swarm( - &'a self, - swarm: &'a mut libp2p::Swarm, - ) -> NetworkService<'a> { - NetworkService { - swarm, - inbound_mapping: self.inbound_mapping, - public_key_to_libp2p_id: &self.public_key_to_libp2p_id, - secret_key: self.secret_key, - connected_peers: self.connected_peers.clone(), - span: &self.span, - my_id: self.my_id, - } - } -} - -pub struct NetworkService<'a> { - pub swarm: &'a mut libp2p::Swarm, - pub inbound_mapping: &'a [InboundMapping], - pub public_key_to_libp2p_id: &'a Arc>>, - pub connected_peers: Arc, - pub secret_key: &'a GossipMsgKeyPair, - pub span: &'a tracing::Span, - pub my_id: PeerId, -} - -impl NetworkService<'_> { - /// Handle local requests that are meant to be sent to the network. - pub(crate) fn handle_intra_node_payload(&mut self, msg: IntraNodePayload) { - let _enter = self.span.enter(); - match (msg.message_type, msg.payload) { - (MessageType::Broadcast, GossipOrRequestResponse::Gossip(payload)) => { - let gossip_message = bincode::serialize(&payload).expect("Should serialize"); - if let Err(e) = self - .swarm - .behaviour_mut() - .gossipsub - .publish(msg.topic, gossip_message) - { - gadget_logging::error!("Publish error: {e:?}"); - } - } - - (MessageType::P2P(peer_id), GossipOrRequestResponse::Request(req)) => { - // Send the outer payload in order to attach the topic to it - // "Requests are sent using Behaviour::send_request and the responses - // received as Message::Response via Event::Message." - self.swarm.behaviour_mut().p2p.send_request(&peer_id, req); - } - (MessageType::Broadcast, GossipOrRequestResponse::Request(_)) => { - gadget_logging::error!("Broadcasting a request is not supported"); - } - (MessageType::Broadcast, GossipOrRequestResponse::Response(_)) => { - gadget_logging::error!("Broadcasting a response is not supported"); - } - (MessageType::P2P(_), GossipOrRequestResponse::Gossip(_)) => { - gadget_logging::error!("P2P message should be a request or response"); - } - (MessageType::P2P(_), GossipOrRequestResponse::Response(_)) => { - // TODO: Send the response to the peer. - } - } - } - - /// Handle inbound events from the networking layer - #[allow(clippy::too_many_lines)] - pub(crate) async fn handle_swarm_event(&mut self, event: SwarmEvent) { - use MyBehaviourEvent::{Dcutr, Gossipsub, Identify, Kadmelia, Mdns, P2p, Ping, Relay}; - use SwarmEvent::{ - Behaviour, ConnectionClosed, ConnectionEstablished, Dialing, ExpiredListenAddr, - ExternalAddrConfirmed, ExternalAddrExpired, IncomingConnection, - IncomingConnectionError, ListenerClosed, ListenerError, NewExternalAddrCandidate, - NewExternalAddrOfPeer, NewListenAddr, OutgoingConnectionError, - }; - let _enter = self.span.enter(); - match event { - Behaviour(P2p(event)) => { - self.handle_p2p(event).await; - } - Behaviour(Gossipsub(event)) => { - self.handle_gossip(event).await; - } - Behaviour(Mdns(event)) => { - self.handle_mdns_event(event).await; - } - Behaviour(Identify(event)) => { - self.handle_identify_event(event).await; - } - Behaviour(Kadmelia(event)) => { - gadget_logging::trace!("Kadmelia event: {event:?}"); - } - Behaviour(Dcutr(event)) => { - self.handle_dcutr_event(event).await; - } - Behaviour(Relay(event)) => { - self.handle_relay_event(event).await; - } - Behaviour(Ping(event)) => { - self.handle_ping_event(event).await; - } - - NewListenAddr { - address, - listener_id, - } => { - gadget_logging::trace!("{listener_id} has a new address: {address}"); - } - ConnectionEstablished { - peer_id, - num_established, - .. - } => { - self.handle_connection_established(peer_id, num_established.get()) - .await; - } - ConnectionClosed { - peer_id, - num_established, - cause, - .. - } => { - self.handle_connection_closed(peer_id, num_established, cause) - .await; - } - IncomingConnection { - connection_id, - local_addr, - send_back_addr, - } => { - self.handle_incoming_connection(connection_id, local_addr, send_back_addr) - .await; - } - IncomingConnectionError { - connection_id, - local_addr, - send_back_addr, - error, - } => { - self.handle_incoming_connection_error( - connection_id, - local_addr, - send_back_addr, - error, - ) - .await; - } - OutgoingConnectionError { - connection_id, - peer_id, - error, - } => { - self.handle_outgoing_connection_error(connection_id, peer_id, error) - .await; - } - ExpiredListenAddr { - listener_id, - address, - } => { - gadget_logging::trace!("{listener_id} has an expired address: {address}"); - } - ListenerClosed { - listener_id, - addresses, - reason, - } => { - gadget_logging::trace!( - "{listener_id} on {addresses:?} has been closed: {reason:?}" - ); - } - ListenerError { listener_id, error } => { - gadget_logging::error!("{listener_id} has an error: {error}"); - } - Dialing { - peer_id, - connection_id, - } => { - gadget_logging::trace!( - "Dialing peer: {peer_id:?} with connection_id: {connection_id}" - ); - } - NewExternalAddrCandidate { address } => { - gadget_logging::trace!("New external address candidate: {address}"); - } - ExternalAddrConfirmed { address } => { - gadget_logging::trace!("External address confirmed: {address}"); - } - ExternalAddrExpired { address } => { - gadget_logging::trace!("External address expired: {address}"); - } - NewExternalAddrOfPeer { peer_id, address } => { - gadget_logging::trace!( - "New external address of peer: {peer_id} with address: {address}" - ); - } - unknown => { - gadget_logging::warn!("Unknown swarm event: {unknown:?}"); - } - } - } -} - -pub struct GossipHandle { - pub topic: IdentTopic, - pub tx_to_outbound: UnboundedSender, - pub rx_from_inbound: Arc>>>, - pub connected_peers: Arc, - pub public_key_to_libp2p_id: Arc>>, - pub recent_messages: parking_lot::Mutex>, - pub my_id: GossipMsgPublicKey, -} - -impl GossipHandle { - #[must_use] - pub fn connected_peers(&self) -> usize { - self.connected_peers - .load(gadget_std::sync::atomic::Ordering::Relaxed) - } - - #[must_use] - pub fn topic(&self) -> IdentTopic { - self.topic.clone() - } - - /// Returns an ordered vector of public keys of the peers that are connected to the gossipsub topic. - pub async fn peers(&self) -> Vec { - self.public_key_to_libp2p_id - .read() - .await - .keys() - .copied() - .collect() - } -} - -pub struct IntraNodePayload { - topic: IdentTopic, - payload: GossipOrRequestResponse, - message_type: MessageType, -} - -impl gadget_std::fmt::Debug for IntraNodePayload { - fn fmt(&self, f: &mut gadget_std::fmt::Formatter<'_>) -> gadget_std::fmt::Result { - f.debug_struct("IntraNodePayload") - .field("topic", &self.topic) - .finish_non_exhaustive() - } -} - -#[non_exhaustive] -#[derive(Serialize, Deserialize, Debug)] -pub enum GossipOrRequestResponse { - Gossip(GossipMessage), - Request(MyBehaviourRequest), - Response(MyBehaviourResponse), -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GossipMessage { - pub topic: String, - pub raw_payload: Vec, -} - -#[non_exhaustive] -#[derive(Serialize, Deserialize, Debug)] -pub enum MyBehaviourRequest { - Handshake { - public_key: GossipMsgPublicKey, - signature: GossipSignedMsgSignature, - }, - Message { - topic: String, - raw_payload: Vec, - }, -} - -#[non_exhaustive] -#[derive(Serialize, Deserialize, Debug)] -pub enum MyBehaviourResponse { - Handshaked { - public_key: GossipMsgPublicKey, - signature: GossipSignedMsgSignature, - }, - MessageHandled, -} - -enum MessageType { - Broadcast, - P2P(PeerId), -} - -#[async_trait] -impl Network for GossipHandle { - async fn next_message(&self) -> Option { - loop { - let mut lock = self - .rx_from_inbound - .try_lock() - .expect("There should be only a single caller for `next_message`"); - - let message_bytes = lock.recv().await?; - drop(lock); - match bincode::deserialize::(&message_bytes) { - Ok(message) => { - let hash = blake3_256(&message_bytes); - let mut map = self.recent_messages.lock(); - if map - .insert(hash, ()) - .expect("Should not exceed memory limit (rx)") - .is_none() - { - return Some(message); - } - } - Err(e) => { - gadget_logging::error!("Failed to deserialize message (gossip): {e}"); - } - } - } - } - - async fn send_message(&self, mut message: ProtocolMessage) -> Result<(), Error> { - message.sender.public_key = Some(self.my_id); - let message_type = if let Some(ParticipantInfo { - public_key: Some(to), - .. - }) = message.recipient - { - let pub_key_to_libp2p_id = self.public_key_to_libp2p_id.read().await; - gadget_logging::trace!("Handshake count: {}", pub_key_to_libp2p_id.len()); - let libp2p_id = pub_key_to_libp2p_id - .get(&to) - .copied() - .ok_or_else(|| { - Error::NetworkError(format!( - "No libp2p ID found for crypto public key: {:?}. No handshake happened? Total handshakes: {}", - to, pub_key_to_libp2p_id.len(), - )) - })?; - - MessageType::P2P(libp2p_id) - } else { - MessageType::Broadcast - }; - - let raw_payload = - bincode::serialize(&message).map_err(|err| Error::MessagingError(err.to_string()))?; - let payload_inner = match message_type { - MessageType::Broadcast => GossipOrRequestResponse::Gossip(GossipMessage { - topic: self.topic.to_string(), - raw_payload, - }), - MessageType::P2P(_) => GossipOrRequestResponse::Request(MyBehaviourRequest::Message { - topic: self.topic.to_string(), - raw_payload, - }), - }; - - let payload = IntraNodePayload { - topic: self.topic.clone(), - payload: payload_inner, - message_type, - }; - - self.tx_to_outbound - .send(payload) - .map_err(|e| Error::NetworkError(format!("Failed to send intra-node payload: {e}"))) - } - - fn public_id(&self) -> GossipMsgPublicKey { - self.my_id - } -} diff --git a/crates/crypto/sp-core/src/sp_core_util.rs b/crates/networking/src/handlers/blueprint_protocol.rs similarity index 100% rename from crates/crypto/sp-core/src/sp_core_util.rs rename to crates/networking/src/handlers/blueprint_protocol.rs diff --git a/crates/networking/src/handlers/connections.rs b/crates/networking/src/handlers/connections.rs deleted file mode 100644 index 0f686c31e..000000000 --- a/crates/networking/src/handlers/connections.rs +++ /dev/null @@ -1,140 +0,0 @@ -#![allow(unused_results, clippy::used_underscore_binding)] - -use crate::gossip::{MyBehaviourRequest, NetworkService}; -use crate::key_types::Curve; -use gadget_crypto::KeyType; -use gadget_std as std; -use itertools::Itertools; -use libp2p::PeerId; - -impl NetworkService<'_> { - #[tracing::instrument(skip(self))] - pub(crate) async fn handle_connection_established( - &mut self, - peer_id: PeerId, - _num_established: u32, - ) { - gadget_logging::debug!("Connection established"); - if !self - .public_key_to_libp2p_id - .read() - .await - .iter() - .any(|(_, id)| id == &peer_id) - { - let my_peer_id = *self.swarm.local_peer_id(); - let msg = my_peer_id.to_bytes(); - match ::sign_with_secret(&mut self.secret_key.clone(), &msg) { - Ok(signature) => { - let handshake = MyBehaviourRequest::Handshake { - public_key: self.secret_key.public(), - signature, - }; - self.swarm - .behaviour_mut() - .p2p - .send_request(&peer_id, handshake); - self.swarm - .behaviour_mut() - .gossipsub - .add_explicit_peer(&peer_id); - gadget_logging::info!("Sent handshake from {my_peer_id} to {peer_id}"); - } - Err(e) => { - gadget_logging::error!("Failed to sign handshake: {e}"); - } - } - } - } - - #[tracing::instrument(skip(self))] - pub(crate) async fn handle_connection_closed( - &mut self, - peer_id: PeerId, - num_established: u32, - _cause: Option, - ) { - gadget_logging::trace!("Connection closed"); - if num_established == 0 { - self.swarm - .behaviour_mut() - .gossipsub - .remove_explicit_peer(&peer_id); - let mut pub_key_to_libp2p_id = self.public_key_to_libp2p_id.write().await; - let len_initial = 0; - pub_key_to_libp2p_id.retain(|_, id| *id != peer_id); - if pub_key_to_libp2p_id.len() == len_initial + 1 { - self.connected_peers - .fetch_sub(1, std::sync::atomic::Ordering::Relaxed); - } - } - } - - #[tracing::instrument(skip(self))] - pub(crate) async fn handle_incoming_connection( - &mut self, - _connection_id: libp2p::swarm::ConnectionId, - _local_addr: libp2p::Multiaddr, - _send_back_addr: libp2p::Multiaddr, - ) { - gadget_logging::trace!("Incoming connection"); - } - - #[tracing::instrument(skip(self))] - async fn handle_outgoing_connection( - &mut self, - peer_id: PeerId, - _connection_id: libp2p::swarm::ConnectionId, - ) { - gadget_logging::trace!("Outgoing connection to peer: {peer_id}"); - } - - #[tracing::instrument(skip(self, error))] - pub(crate) async fn handle_incoming_connection_error( - &mut self, - _connection_id: libp2p::swarm::ConnectionId, - _local_addr: libp2p::Multiaddr, - _send_back_addr: libp2p::Multiaddr, - error: libp2p::swarm::ListenError, - ) { - gadget_logging::error!("Incoming connection error: {error}"); - } - - #[tracing::instrument(skip(self, error))] - pub(crate) async fn handle_outgoing_connection_error( - &mut self, - _connection_id: libp2p::swarm::ConnectionId, - _peer_id: Option, - error: libp2p::swarm::DialError, - ) { - if let libp2p::swarm::DialError::Transport(addrs) = error { - let read = self.public_key_to_libp2p_id.read().await; - for (addr, err) in addrs { - if let Some(peer_id) = get_peer_id_from_multiaddr(&addr) { - if !read.values().contains(&peer_id) { - gadget_logging::warn!( - "Outgoing connection error to peer: {peer_id} at {addr}: {err}", - peer_id = peer_id, - addr = addr, - err = err - ); - } - } - } - } else { - gadget_logging::error!("Outgoing connection error to peer: {error}"); - } - } -} - -fn get_peer_id_from_multiaddr(addr: &libp2p::Multiaddr) -> Option { - addr.iter() - .find_map(|proto| { - if let libp2p::multiaddr::Protocol::P2p(peer_id) = proto { - Some(Some(peer_id)) - } else { - None - } - }) - .flatten() -} diff --git a/crates/networking/src/handlers/dcutr.rs b/crates/networking/src/handlers/dcutr.rs deleted file mode 100644 index f0fd3497f..000000000 --- a/crates/networking/src/handlers/dcutr.rs +++ /dev/null @@ -1,8 +0,0 @@ -use crate::gossip::NetworkService; - -impl NetworkService<'_> { - #[tracing::instrument(skip(self, event))] - pub async fn handle_dcutr_event(&mut self, event: libp2p::dcutr::Event) { - gadget_logging::trace!("DCUTR event: {event:?}"); - } -} diff --git a/crates/networking/src/handlers/discovery.rs b/crates/networking/src/handlers/discovery.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/crates/networking/src/handlers/discovery.rs @@ -0,0 +1 @@ + diff --git a/crates/networking/src/handlers/gossip.rs b/crates/networking/src/handlers/gossip.rs deleted file mode 100644 index ae4c7d559..000000000 --- a/crates/networking/src/handlers/gossip.rs +++ /dev/null @@ -1,117 +0,0 @@ -#![allow(unused_results)] - -use crate::gossip::{GossipMessage, NetworkService}; -use gadget_std::string::ToString; -use gadget_std::sync::atomic::AtomicUsize; -use gadget_std::sync::Arc; -use libp2p::gossipsub::{Event, TopicHash}; -use libp2p::{gossipsub, PeerId}; - -impl NetworkService<'_> { - #[tracing::instrument(skip(self, event))] - pub(crate) async fn handle_gossip(&mut self, event: gossipsub::Event) { - let with_connected_peers = |topic: &TopicHash, f: fn(&Arc)| { - let maybe_mapping = self - .inbound_mapping - .iter() - .find(|r| r.0.to_string() == topic.to_string()); - match maybe_mapping { - Some((_, _, connected_peers)) => { - f(connected_peers); - true - } - None => false, - } - }; - match event { - Event::Message { - propagation_source, - message_id, - message, - } => { - self.handle_gossip_message(propagation_source, message_id, message) - .await; - } - Event::Subscribed { peer_id, topic } => { - let added = with_connected_peers(&topic, |_connected_peers| { - // Code commented out because each peer needs to do a request-response - // direct P2P handshake, which is where the connected_peers counter is - // incremented. Adding here will just add twice, which is undesirable. - // connected_peers.fetch_add(1, gadget_std::sync::atomic::Ordering::Relaxed); - }); - if added { - gadget_logging::trace!("{peer_id} subscribed to {topic}"); - } else { - gadget_logging::error!("{peer_id} subscribed to unknown topic: {topic}"); - } - } - Event::Unsubscribed { peer_id, topic } => { - let removed = with_connected_peers(&topic, |_connected_peers| { - // Code commented out because each peer needs to do a request-response - // direct P2P handshake, which is where the connected_peers counter is - // decremented. Subbing here will just sub twice, which is undesirable. - // connected_peers.fetch_sub(1, gadget_std::sync::atomic::Ordering::Relaxed); - }); - if removed { - gadget_logging::trace!("{peer_id} unsubscribed from {topic}"); - } else { - gadget_logging::error!("{peer_id} unsubscribed from unknown topic: {topic}"); - } - } - Event::GossipsubNotSupported { peer_id } => { - gadget_logging::trace!("{peer_id} does not support gossipsub!"); - } - Event::SlowPeer { - peer_id, - failed_messages: _, - } => { - gadget_logging::error!("{peer_id} wasn't able to download messages in time!"); - } - } - } - - #[tracing::instrument( - skip(self, message), - fields( - %_message_id, - %_propagation_source, - source = ?message.source - ) - )] - async fn handle_gossip_message( - &mut self, - _propagation_source: PeerId, - _message_id: gossipsub::MessageId, - message: gossipsub::Message, - ) { - let Some(origin) = message.source else { - gadget_logging::error!("Got message from unknown peer"); - return; - }; - - // Reject messages from self - if origin == self.my_id { - return; - } - - gadget_logging::trace!("Got message from peer: {origin}"); - match bincode::deserialize::(&message.data) { - Ok(GossipMessage { topic, raw_payload }) => { - if let Some((_, tx, _)) = self - .inbound_mapping - .iter() - .find(|r| r.0.to_string() == topic) - { - if let Err(e) = tx.send(raw_payload) { - gadget_logging::warn!("Failed to send message to worker: {e}"); - } - } else { - gadget_logging::error!("No registered worker for topic: {topic}!"); - } - } - Err(e) => { - gadget_logging::error!("Failed to deserialize message (handlers/gossip): {e}"); - } - } - } -} diff --git a/crates/networking/src/handlers/identify.rs b/crates/networking/src/handlers/identify.rs deleted file mode 100644 index 017be564b..000000000 --- a/crates/networking/src/handlers/identify.rs +++ /dev/null @@ -1,41 +0,0 @@ -use crate::gossip::NetworkService; -use gadget_std::format; - -impl NetworkService<'_> { - #[tracing::instrument(skip(self, event))] - pub(crate) async fn handle_identify_event(&mut self, event: libp2p::identify::Event) { - use libp2p::identify::Event::{Error, Pushed, Received, Sent}; - match event { - Received { peer_id, info, .. } => { - // TODO: Verify the peer info, for example the protocol version, agent version, etc. - let info_lines = [ - format!("Protocol Version: {}", info.protocol_version), - format!("Agent Version: {}", info.agent_version), - format!("Supported Protocols: {:?}", info.protocols), - ]; - let info_lines = info_lines.join(", "); - gadget_logging::trace!( - "Received identify event from peer: {peer_id} with info: {info_lines}" - ); - self.swarm.add_external_address(info.observed_addr); - } - Sent { peer_id, .. } => { - gadget_logging::trace!("Sent identify event to peer: {peer_id}"); - } - Pushed { peer_id, info, .. } => { - let info_lines = [ - format!("Protocol Version: {}", info.protocol_version), - format!("Agent Version: {}", info.agent_version), - format!("Supported Protocols: {:?}", info.protocols), - ]; - let info_lines = info_lines.join(", "); - gadget_logging::trace!( - "Pushed identify event to peer: {peer_id} with info: {info_lines}" - ); - } - Error { peer_id, error, .. } => { - gadget_logging::error!("Identify error from peer: {peer_id} with error: {error}"); - } - } - } -} diff --git a/crates/networking/src/handlers/kadmelia.rs b/crates/networking/src/handlers/kadmelia.rs deleted file mode 100644 index fec6a8a39..000000000 --- a/crates/networking/src/handlers/kadmelia.rs +++ /dev/null @@ -1,9 +0,0 @@ -use crate::gossip::NetworkService; - -impl NetworkService<'_> { - #[tracing::instrument(skip(self, event))] - async fn handle_kadmelia_event(&mut self, event: libp2p::kad::Event) { - // TODO: Handle kadmelia events - gadget_logging::trace!("Kadmelia event: {event:?}"); - } -} diff --git a/crates/networking/src/handlers/mdns.rs b/crates/networking/src/handlers/mdns.rs deleted file mode 100644 index 9bdd1ac8d..000000000 --- a/crates/networking/src/handlers/mdns.rs +++ /dev/null @@ -1,32 +0,0 @@ -use crate::gossip::NetworkService; -use libp2p::mdns; - -impl NetworkService<'_> { - #[tracing::instrument(skip(self, event))] - pub(crate) async fn handle_mdns_event(&mut self, event: mdns::Event) { - use mdns::Event::{Discovered, Expired}; - match event { - Discovered(list) => { - for (peer_id, multiaddr) in list { - gadget_logging::trace!("discovered a new peer: {peer_id} on {multiaddr}"); - self.swarm - .behaviour_mut() - .gossipsub - .add_explicit_peer(&peer_id); - if let Err(err) = self.swarm.dial(multiaddr) { - gadget_logging::error!("Failed to dial peer: {err}"); - } - } - } - Expired(list) => { - for (peer_id, multiaddr) in list { - gadget_logging::trace!("discover peer has expired: {peer_id} with {multiaddr}"); - self.swarm - .behaviour_mut() - .gossipsub - .remove_explicit_peer(&peer_id); - } - } - } - } -} diff --git a/crates/networking/src/handlers/mod.rs b/crates/networking/src/handlers/mod.rs index 63b39123e..4e5668a5d 100644 --- a/crates/networking/src/handlers/mod.rs +++ b/crates/networking/src/handlers/mod.rs @@ -1,10 +1,3 @@ -#[cfg(not(target_family = "wasm"))] -pub mod connections; -pub mod dcutr; -pub mod gossip; -pub mod identify; -pub mod kadmelia; -pub mod mdns; -pub mod p2p; +pub mod blueprint_protocol; +pub mod discovery; pub mod ping; -pub mod relay; diff --git a/crates/networking/src/handlers/p2p.rs b/crates/networking/src/handlers/p2p.rs deleted file mode 100644 index 7dd5349a9..000000000 --- a/crates/networking/src/handlers/p2p.rs +++ /dev/null @@ -1,205 +0,0 @@ -#![allow(unused_results)] - -use crate::gossip::{MyBehaviourRequest, MyBehaviourResponse, NetworkService}; -use crate::key_types::Curve; -use gadget_crypto::KeyType; -use gadget_std::string::ToString; -use gadget_std::sync::atomic::Ordering; -use libp2p::gossipsub::IdentTopic; -use libp2p::{request_response, PeerId}; - -impl NetworkService<'_> { - #[tracing::instrument(skip(self, event))] - pub(crate) async fn handle_p2p( - &mut self, - event: request_response::Event, - ) { - use request_response::Event::{InboundFailure, Message, OutboundFailure, ResponseSent}; - match event { - Message { - peer, - message, - connection_id: _, - } => { - gadget_logging::trace!("Received P2P message from: {peer}"); - self.handle_p2p_message(peer, message).await; - } - OutboundFailure { - peer, - request_id, - error, - connection_id: _, - } => { - gadget_logging::error!("Failed to send message to peer: {peer} with request_id: {request_id} and error: {error}"); - } - InboundFailure { - peer, - request_id, - error, - connection_id: _, - } => { - gadget_logging::error!("Failed to receive message from peer: {peer} with request_id: {request_id} and error: {error}"); - } - ResponseSent { - peer, - request_id, - connection_id: _, - } => { - gadget_logging::debug!( - "Sent response to peer: {peer} with request_id: {request_id}" - ); - } - } - } - - #[tracing::instrument(skip(self, message))] - async fn handle_p2p_message( - &mut self, - peer: PeerId, - message: request_response::Message, - ) { - use request_response::Message::{Request, Response}; - match message { - Request { - request, - channel, - request_id, - } => { - gadget_logging::trace!( - "Received request with request_id: {request_id} from peer: {peer}" - ); - self.handle_p2p_request(peer, request_id, request, channel) - .await; - } - Response { - response, - request_id, - } => { - gadget_logging::trace!( - "Received response from peer: {peer} with request_id: {request_id}" - ); - self.handle_p2p_response(peer, request_id, response).await; - } - } - } - - #[tracing::instrument(skip(self, req, channel))] - async fn handle_p2p_request( - &mut self, - peer: PeerId, - request_id: request_response::InboundRequestId, - req: MyBehaviourRequest, - channel: request_response::ResponseChannel, - ) { - use crate::gossip::MyBehaviourRequest::{Handshake, Message}; - let result = match req { - Handshake { - public_key, - signature, - } => { - gadget_logging::trace!("Received handshake from peer: {peer}"); - // Verify the signature - let msg = peer.to_bytes(); - let valid = ::verify(&public_key, &msg, &signature); - if !valid { - gadget_logging::warn!("Invalid initial handshake signature from peer: {peer}"); - let _ = self.swarm.disconnect_peer_id(peer); - return; - } - if self - .public_key_to_libp2p_id - .write() - .await - .insert(public_key, peer) - .is_none() - { - let _ = self.connected_peers.fetch_add(1, Ordering::Relaxed); - } - // Send response with our public key - let my_peer_id = self.swarm.local_peer_id(); - let msg = my_peer_id.to_bytes(); - match ::sign_with_secret(&mut self.secret_key.clone(), &msg) { - Ok(signature) => self.swarm.behaviour_mut().p2p.send_response( - channel, - MyBehaviourResponse::Handshaked { - public_key: self.secret_key.public(), - signature, - }, - ), - Err(e) => { - gadget_logging::error!("Failed to sign message: {e}"); - return; - } - } - } - Message { topic, raw_payload } => { - // Reject messages from self - if peer == self.my_id { - return; - } - - let topic = IdentTopic::new(topic); - if let Some((_, tx, _)) = self - .inbound_mapping - .iter() - .find(|r| r.0.to_string() == topic.to_string()) - { - if let Err(e) = tx.send(raw_payload) { - gadget_logging::warn!("Failed to send message to worker: {e}"); - } - } else { - gadget_logging::error!("No registered worker for topic: {topic}!"); - } - self.swarm - .behaviour_mut() - .p2p - .send_response(channel, MyBehaviourResponse::MessageHandled) - } - }; - if result.is_err() { - gadget_logging::error!("Failed to send response for {request_id}"); - } - } - - #[tracing::instrument(skip(self, message))] - async fn handle_p2p_response( - &mut self, - peer: PeerId, - request_id: request_response::OutboundRequestId, - message: MyBehaviourResponse, - ) { - use crate::gossip::MyBehaviourResponse::{Handshaked, MessageHandled}; - match message { - Handshaked { - public_key, - signature, - } => { - gadget_logging::trace!("Received handshake-ack message from peer: {peer}"); - let msg = peer.to_bytes(); - let valid = ::verify(&public_key, &msg, &signature); - if !valid { - gadget_logging::warn!( - "Invalid handshake-acknowledgement signature from peer: {peer}" - ); - // TODO: report this peer. - self.public_key_to_libp2p_id - .write() - .await - .remove(&public_key); - let _ = self.swarm.disconnect_peer_id(peer); - return; - } - if self - .public_key_to_libp2p_id - .write() - .await - .insert(public_key, peer) - .is_none() - { - let _ = self.connected_peers.fetch_add(1, Ordering::Relaxed); - } - } - MessageHandled => {} - } - } -} diff --git a/crates/networking/src/handlers/ping.rs b/crates/networking/src/handlers/ping.rs index af93d89fd..8b1378917 100644 --- a/crates/networking/src/handlers/ping.rs +++ b/crates/networking/src/handlers/ping.rs @@ -1,8 +1 @@ -use crate::gossip::NetworkService; -impl NetworkService<'_> { - #[tracing::instrument(skip(self, _event))] - pub async fn handle_ping_event(&mut self, _event: libp2p::ping::Event) { - //gadget_logging::trace!("Ping event: {event:?}"); - } -} diff --git a/crates/networking/src/handlers/relay.rs b/crates/networking/src/handlers/relay.rs deleted file mode 100644 index 320132086..000000000 --- a/crates/networking/src/handlers/relay.rs +++ /dev/null @@ -1,8 +0,0 @@ -use crate::gossip::NetworkService; - -impl NetworkService<'_> { - #[tracing::instrument(skip(self, event))] - pub async fn handle_relay_event(&mut self, event: libp2p::relay::Event) { - gadget_logging::trace!("Relay event: {event:?}"); - } -} diff --git a/crates/networking/src/lib.rs b/crates/networking/src/lib.rs index 8e9120a17..3656cbd36 100644 --- a/crates/networking/src/lib.rs +++ b/crates/networking/src/lib.rs @@ -1,21 +1,20 @@ #![cfg_attr(docsrs, feature(doc_auto_cfg))] -pub mod gossip; -pub mod handlers; -pub mod messaging; -pub mod networking; -#[cfg(feature = "round-based-compat")] -pub mod round_based_compat; -#[cfg(feature = "round-based-compat")] -pub use round_based; - +pub mod behaviours; +pub mod blueprint_protocol; +pub mod discovery; pub mod error; -pub mod setup; +pub mod handlers; +pub mod service; +pub mod service_handle; +pub mod types; -/// Unique identifier for a party -pub type UserID = u16; +#[cfg(test)] +mod tests; +pub use gadget_crypto::KeyType; pub use key_types::*; +pub use service::{NetworkConfig, NetworkEvent, NetworkService}; #[cfg(all( feature = "sp-core-ecdsa", @@ -24,8 +23,8 @@ pub use key_types::*; ))] pub mod key_types { pub use gadget_crypto::sp_core::{ - SpEcdsa as Curve, SpEcdsaPair as GossipMsgKeyPair, SpEcdsaPublic as GossipMsgPublicKey, - SpEcdsaSignature as GossipSignedMsgSignature, + SpEcdsa as Curve, SpEcdsaPair as InstanceMsgKeyPair, SpEcdsaPublic as InstanceMsgPublicKey, + SpEcdsaSignature as InstanceSignedMsgSignature, }; } @@ -36,8 +35,8 @@ pub mod key_types { ))] pub mod key_types { pub use gadget_crypto::sp_core::{ - SpSr25519 as Curve, SpSr25519Pair as GossipMsgKeyPair, - SpSr25519Public as GossipMsgPublicKey, SpSr25519Signature as GossipSignedMsgSignature, + SpSr25519 as Curve, SpSr25519Pair as InstanceMsgKeyPair, + SpSr25519Public as InstanceMsgPublicKey, SpSr25519Signature as InstanceSignedMsgSignature, }; } @@ -48,8 +47,8 @@ pub mod key_types { ))] pub mod key_types { pub use gadget_crypto::sp_core::{ - SpEd25519 as Curve, SpEd25519Pair as GossipMsgKeyPair, - SpEd25519Public as GossipMsgPublicKey, SpEd25519Signature as GossipSignedMsgSignature, + SpEd25519 as Curve, SpEd25519Pair as InstanceMsgKeyPair, + SpEd25519Public as InstanceMsgPublicKey, SpEd25519Signature as InstanceSignedMsgSignature, }; } @@ -61,8 +60,8 @@ pub mod key_types { pub mod key_types { // Default to k256 ECDSA implementation pub use gadget_crypto::k256::{ - K256Ecdsa as Curve, K256Signature as GossipSignedMsgSignature, - K256SigningKey as GossipMsgKeyPair, K256VerifyingKey as GossipMsgPublicKey, + K256Ecdsa as Curve, K256Signature as InstanceSignedMsgSignature, + K256SigningKey as InstanceMsgKeyPair, K256VerifyingKey as InstanceMsgPublicKey, }; } diff --git a/crates/networking/src/messaging.rs b/crates/networking/src/messaging.rs deleted file mode 100644 index c4a0dd6c1..000000000 --- a/crates/networking/src/messaging.rs +++ /dev/null @@ -1,468 +0,0 @@ -use async_trait::async_trait; -use gadget_std as std; -use gadget_std::boxed::Box; -use gadget_std::collections::HashMap; -use gadget_std::fmt::Display; -use gadget_std::hash::Hash; -use gadget_std::ops::Add; -use gadget_std::sync::atomic::AtomicBool; -use gadget_std::sync::Arc; -use gadget_std::{eprintln, string::String, vec::Vec}; -use itertools::Itertools; -use serde::{Deserialize, Serialize}; -use tokio::sync::RwLock; -use tokio::time::{sleep, Duration}; - -const OUTBOUND_POLL: Duration = Duration::from_millis(100); -const INBOUND_POLL: Duration = Duration::from_millis(100); - -#[async_trait] -pub trait MessageMetadata { - type JobId: Display + Hash + Eq + Copy + Send + Sync + 'static; - type PeerId: Display + Hash + Eq + Copy + Send + Sync + 'static; - type MessageId: Add - + Eq - + PartialEq - + Display - + Hash - + Ord - + PartialOrd - + Copy - + Send - + Sync - + 'static; - - fn job_id(&self) -> Self::JobId; - fn source_id(&self) -> Self::PeerId; - fn destination_id(&self) -> Self::PeerId; - fn message_id(&self) -> Self::MessageId; - fn contents(&self) -> &[u8]; -} - -#[async_trait] -pub trait NetworkMessagingIO { - type Message: MessageMetadata + Send + Sync + 'static; - - async fn next_message(&self) -> Option>; - async fn send_message(&self, message: &Payload) -> Result<(), NetworkError>; -} - -#[derive(Debug, Serialize, Deserialize)] -pub enum Payload { - Ack { - job_id: M::JobId, - from_id: M::PeerId, - message_id: M::MessageId, - }, - Message(M), -} - -#[derive(Debug)] -pub enum NetworkError { - SendFailed(String), - ConnectionError(String), -} - -#[derive(Debug)] -pub enum BackendError { - StorageError(String), - NotFound, - Stopped, -} - -#[derive(Debug, Copy, Clone)] -pub enum DeliveryError { - NoReceiver, - ChannelClosed, -} - -// Modified Backend trait to handle both outbound and inbound messages -#[async_trait] -pub trait Backend { - async fn store_outbound(&self, message: M) -> Result<(), BackendError>; - async fn store_inbound(&self, message: M) -> Result<(), BackendError>; - async fn clear_message( - &self, - peer_id: M::PeerId, - job_id: M::JobId, - message_id: M::MessageId, - ) -> Result<(), BackendError>; - async fn get_pending_outbound(&self) -> Result, BackendError>; - async fn get_pending_inbound(&self) -> Result, BackendError>; -} - -#[async_trait] -pub trait LocalDelivery { - async fn deliver(&self, message: M) -> Result<(), DeliveryError>; -} - -// Add this new struct to track last ACKed message IDs -pub struct MessageTracker { - last_acked: HashMap<(M::JobId, M::PeerId), M::MessageId>, -} - -impl MessageTracker { - fn new() -> Self { - Self { - last_acked: HashMap::new(), - } - } - - fn update_ack(&mut self, job_id: M::JobId, peer_id: M::PeerId, msg_id: M::MessageId) { - let key = (job_id, peer_id); - match self.last_acked.get(&key) { - Some(last_id) => { - if msg_id == *last_id + 1usize { - let _ = self.last_acked.insert(key, msg_id); - } - } - None => { - // For the first message in a sequence, only accept if it's the initial message - let _ = self.last_acked.insert(key, msg_id); - } - } - } - - fn can_send(&self, job_id: &M::JobId, peer_id: &M::PeerId, msg_id: &M::MessageId) -> bool { - match self.last_acked.get(&(*job_id, *peer_id)) { - Some(last_id) => *msg_id == *last_id + 1usize, - None => true, // If there is no message for this job/peer_id combo, send it - } - } -} - -pub struct MessageSystem -where - M: MessageMetadata + Clone + Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static, - B: Backend + Send + Sync + 'static, - L: LocalDelivery + Send + Sync + 'static, - N: NetworkMessagingIO + Send + Sync + 'static, -{ - backend: Arc, - local_delivery: Arc, - network: Arc, - is_running: Arc, - tracker: Arc>>, -} - -impl Clone for MessageSystem -where - M: MessageMetadata + Clone + Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static, - B: Backend + Send + Sync + 'static, - L: LocalDelivery + Send + Sync + 'static, - N: NetworkMessagingIO + Send + Sync + 'static, -{ - fn clone(&self) -> Self { - Self { - backend: self.backend.clone(), - local_delivery: self.local_delivery.clone(), - network: self.network.clone(), - is_running: self.is_running.clone(), - tracker: self.tracker.clone(), - } - } -} - -impl MessageSystem -where - M: MessageMetadata + Clone + Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static, - B: Backend + Send + Sync + 'static, - L: LocalDelivery + Send + Sync + 'static, - N: NetworkMessagingIO + Send + Sync + 'static, -{ - pub fn new(backend: B, local_delivery: L, network: N) -> Self { - let this = Self { - backend: Arc::new(backend), - local_delivery: Arc::new(local_delivery), - network: Arc::new(network), - is_running: Arc::new(AtomicBool::new(true)), - tracker: Arc::new(RwLock::new(MessageTracker::new())), - }; - - this.spawn_background_tasks(); - - this - } - - fn spawn_background_tasks(&self) { - // Spawn outbound processing task - let self_clone = self.clone(); - let is_alive = self.is_running.clone(); - - let outbound_handle = tokio::spawn(async move { - loop { - self_clone.process_outbound().await; - sleep(OUTBOUND_POLL).await; - } - }); - - // Spawn inbound processing task - let self_clone = self.clone(); - let inbound_handle = tokio::spawn(async move { - loop { - self_clone.process_inbound().await; - sleep(INBOUND_POLL).await; - } - }); - - // Spawn network listener task - let self_clone = self.clone(); - let network_io_handle = tokio::spawn(async move { - self_clone.process_network_messages().await; - }); - - // Spawn a task that selects all three handles, and on any of them finishing, it will - // set the atomic bool to false - drop(tokio::spawn(async move { - tokio::select! { - _ = outbound_handle => { - gadget_logging::error!("Outbound processing task prematurely ended"); - }, - _ = inbound_handle => { - gadget_logging::error!("Inbound processing task prematurely ended"); - }, - _ = network_io_handle => { - gadget_logging::error!("Network IO task prematurely ended"); - }, - } - - is_alive.store(false, gadget_std::sync::atomic::Ordering::Relaxed); - })); - } - - async fn process_outbound(&self) { - let pending_messages = match self.backend.get_pending_outbound().await { - Ok(messages) => messages, - Err(e) => { - eprintln!("Failed to get pending outbound messages: {:?}", e); - return; - } - }; - - // Group messages by (JobId, PeerId) pair - let mut grouped_messages: HashMap<(M::JobId, M::PeerId), Vec> = HashMap::new(); - for msg in pending_messages { - grouped_messages - .entry((msg.job_id(), msg.destination_id())) - .or_default() - .push(msg); - } - - // Process each group independently - let tracker = self.tracker.read().await; - for ((job_id, peer_id), mut messages) in grouped_messages { - // Sort messages by MessageId - messages.sort_by_key(MessageMetadata::message_id); - - // Find the first message we can send based on ACKs - if let Some(msg) = messages - .into_iter() - .find(|m| tracker.can_send(&job_id, &peer_id, &m.message_id())) - { - if let Err(e) = self.network.send_message(&Payload::Message(msg)).await { - eprintln!("Failed to send message: {:?}", e); - } - } - } - } - - async fn process_inbound(&self) { - let pending_messages = match self.backend.get_pending_inbound().await { - Ok(messages) => messages, - Err(e) => { - eprintln!("Failed to get pending inbound messages: {:?}", e); - return; - } - }; - - // Sort the pending messages in order by MessageID - let pending_messages: Vec = pending_messages - .into_iter() - .sorted_by_key(MessageMetadata::message_id) - .collect(); - - for message in pending_messages { - match self.local_delivery.deliver(message.clone()).await { - Ok(()) => { - // Create and send ACK - if let Err(e) = self - .network - .send_message(&Self::create_ack_message(&message)) - .await - { - gadget_logging::error!("Failed to send ACK: {e:?}"); - continue; - } - - // Clear delivered message from backend - if let Err(e) = self - .backend - .clear_message(message.source_id(), message.job_id(), message.message_id()) - .await - { - gadget_logging::error!("Failed to clear delivered message: {e:?}"); - } - } - Err(e) => { - gadget_logging::error!("Failed to deliver message: {e:?}"); - } - } - } - } - - // Modify process_network_messages to update the tracker - async fn process_network_messages(&self) { - loop { - if let Some(message) = self.network.next_message().await { - match message { - Payload::Ack { - job_id, - from_id, - message_id, - } => { - // Update the tracker with the new ACK - let mut tracker = self.tracker.write().await; - tracker.update_ack(job_id, from_id, message_id); - - if let Err(e) = self - .backend - .clear_message(from_id, job_id, message_id) - .await - { - gadget_logging::error!("Failed to clear ACKed message: {e:?}"); - } - } - Payload::Message(msg) => { - if let Err(e) = self.backend.store_inbound(msg).await { - gadget_logging::error!("Failed to store inbound message: {e:?}"); - } - } - } - } - } - } - - /// Send a message through the message system - /// - /// # Errors - /// - /// Returns `BackendError::Stopped` if the message system is not running. - /// May also return other `BackendError` variants if the backend storage operation fails. - pub async fn send_message(&self, message: M) -> Result<(), BackendError> { - if self.is_running.load(std::sync::atomic::Ordering::Relaxed) { - self.backend.store_outbound(message).await - } else { - Err(BackendError::Stopped) - } - } - - fn create_ack_message(original_message: &M) -> Payload { - Payload::Ack { - job_id: original_message.job_id(), - from_id: original_message.source_id(), - message_id: original_message.message_id(), - } - } -} - -// Example InMemoryBackend implementation -pub struct InMemoryBackend { - outbound: Mailbox, - inbound: Mailbox, -} - -type Mailbox = - Arc>>; - -impl InMemoryBackend { - #[must_use] - pub fn new() -> Self { - Self { - outbound: Arc::new(RwLock::new(HashMap::new())), - inbound: Arc::new(RwLock::new(HashMap::new())), - } - } -} - -impl Default for InMemoryBackend { - fn default() -> Self { - Self::new() - } -} - -#[async_trait] -impl Backend for InMemoryBackend { - async fn store_outbound(&self, message: M) -> Result<(), BackendError> { - let mut outbound = self.outbound.write().await; - let (job_id, source_id, message_id) = - (message.job_id(), message.source_id(), message.message_id()); - - if outbound - .insert( - ( - message.job_id(), - message.destination_id(), - message.message_id(), - ), - message, - ) - .is_some() - { - gadget_logging::warn!( - "Overwriting existing message in outbound storage jid={}/dest={}/id={}", - job_id, - source_id, - message_id - ); - } - Ok(()) - } - - async fn store_inbound(&self, message: M) -> Result<(), BackendError> { - let mut inbound = self.inbound.write().await; - let (job_id, source_id, message_id) = - (message.job_id(), message.source_id(), message.message_id()); - - if inbound - .insert( - (message.job_id(), message.source_id(), message.message_id()), - message, - ) - .is_some() - { - gadget_logging::warn!( - "Overwriting existing message in inbound storage jid={}/src={}/id={}", - job_id, - source_id, - message_id - ); - } - Ok(()) - } - - async fn clear_message( - &self, - peer_id: M::PeerId, - job_id: M::JobId, - message_id: M::MessageId, - ) -> Result<(), BackendError> { - // Try to remove from both outbound and inbound - let mut outbound = self.outbound.write().await; - let mut inbound = self.inbound.write().await; - - let _ = outbound.remove(&(job_id, peer_id, message_id)); - let _ = inbound.remove(&(job_id, peer_id, message_id)); - - Ok(()) - } - - async fn get_pending_outbound(&self) -> Result, BackendError> { - let outbound = self.outbound.read().await; - Ok(outbound.values().cloned().collect()) - } - - async fn get_pending_inbound(&self) -> Result, BackendError> { - let inbound = self.inbound.read().await; - Ok(inbound.values().cloned().collect()) - } -} diff --git a/crates/networking/src/networking.rs b/crates/networking/src/networking.rs deleted file mode 100644 index 1ccad1dd9..000000000 --- a/crates/networking/src/networking.rs +++ /dev/null @@ -1,603 +0,0 @@ -#[cfg(test)] -mod tests; - -use crate::key_types::GossipMsgPublicKey; -use crate::Error; -use async_trait::async_trait; -use dashmap::DashMap; -use futures::{Stream, StreamExt}; -use gadget_crypto::hashing::blake3_256; -use gadget_std as std; -use gadget_std::boxed::Box; -use gadget_std::cmp::Reverse; -use gadget_std::collections::{BinaryHeap, HashMap}; -use gadget_std::fmt::Display; -use gadget_std::format; -use gadget_std::ops::{Deref, DerefMut}; -use gadget_std::pin::Pin; -use gadget_std::string::ToString; -use gadget_std::sync::Arc; -use gadget_std::task::{Context, Poll}; -use gadget_std::vec::Vec; -use serde::{Deserialize, Serialize}; -use tokio::sync::mpsc::UnboundedSender; -use tokio::sync::Mutex; - -pub type UserID = u16; - -#[derive(Debug, Serialize, Deserialize, Clone, Copy, Default)] -pub struct IdentifierInfo { - pub message_id: u64, - pub round_id: u16, -} - -impl Display for IdentifierInfo { - fn fmt(&self, f: &mut gadget_std::fmt::Formatter<'_>) -> gadget_std::fmt::Result { - let message_id = format!("message_id: {}", self.message_id); - let round_id = format!("round_id: {}", self.round_id); - write!(f, "{} {}", message_id, round_id) - } -} - -#[derive(Debug, Serialize, Deserialize, Clone, Copy)] -pub struct ParticipantInfo { - pub user_id: u16, - pub public_key: Option, -} - -impl Display for ParticipantInfo { - fn fmt(&self, f: &mut gadget_std::fmt::Formatter<'_>) -> gadget_std::fmt::Result { - let public_key = self - .public_key - .map(|key| format!("public_key: {:?}", key)) - .unwrap_or_default(); - write!(f, "user_id: {}, {}", self.user_id, public_key) - } -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ProtocolMessage { - pub identifier_info: IdentifierInfo, - pub sender: ParticipantInfo, - pub recipient: Option, - pub payload: Vec, -} - -impl Display for ProtocolMessage { - fn fmt(&self, f: &mut gadget_std::fmt::Formatter<'_>) -> gadget_std::fmt::Result { - write!( - f, - "identifier_info: {}, sender: {}, recipient: {:?}, payload: {:?}", - self.identifier_info, self.sender, self.recipient, self.payload - ) - } -} - -#[async_trait] -#[auto_impl::auto_impl(&, Box, Arc)] -pub trait Network: Send + Sync + 'static { - async fn next_message(&self) -> Option; - async fn send_message(&self, message: ProtocolMessage) -> Result<(), Error>; - - fn public_id(&self) -> GossipMsgPublicKey; - - fn build_protocol_message( - &self, - identifier_info: IdentifierInfo, - from: UserID, - to: Option, - payload: &Payload, - to_network_id: Option, - ) -> ProtocolMessage { - assert!( - (u8::from(to.is_none()) + u8::from(to_network_id.is_none()) != 1), - "Either `to` must be Some AND `to_network_id` is Some, or, both None" - ); - - let sender_participant_info = ParticipantInfo { - user_id: from, - public_key: Some(self.public_id()), - }; - let receiver_participant_info = to.map(|to| ParticipantInfo { - user_id: to, - public_key: to_network_id, - }); - ProtocolMessage { - identifier_info, - sender: sender_participant_info, - recipient: receiver_participant_info, - payload: bincode::serialize(payload).expect("Failed to serialize message"), - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -struct SequencedMessage { - sequence_number: u64, - payload: Vec, -} - -#[derive(Debug)] -struct PendingMessage { - sequence_number: u64, - message: ProtocolMessage, -} - -impl PartialEq for PendingMessage { - fn eq(&self, other: &Self) -> bool { - self.sequence_number == other.sequence_number - } -} - -impl Eq for PendingMessage {} - -impl PartialOrd for PendingMessage { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for PendingMessage { - fn cmp(&self, other: &Self) -> gadget_std::cmp::Ordering { - self.sequence_number.cmp(&other.sequence_number) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct MultiplexedMessage { - stream_id: StreamKey, - payload: SequencedMessage, -} - -pub struct NetworkMultiplexer { - to_receiving_streams: ActiveStreams, - unclaimed_receiving_streams: Arc>, - tx_to_networking_layer: MultiplexedSender, - sequence_numbers: Arc>, - pub my_id: GossipMsgPublicKey, -} - -type ActiveStreams = Arc>>; - -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize, Default)] -pub struct StreamKey { - pub task_hash: [u8; 32], - pub round_id: i32, -} - -impl From for StreamKey { - fn from(identifier_info: IdentifierInfo) -> Self { - let str_repr = identifier_info.to_string(); - let task_hash = blake3_256(str_repr.as_bytes()); - Self { - task_hash, - round_id: -1, - } - } -} - -pub struct MultiplexedReceiver { - inner: tokio::sync::mpsc::UnboundedReceiver, - stream_id: StreamKey, - // For post-drop removal purposes - active_streams: ActiveStreams, -} - -#[derive(Clone)] -pub struct MultiplexedSender { - inner: tokio::sync::mpsc::UnboundedSender<(StreamKey, ProtocolMessage)>, - pub(crate) stream_id: StreamKey, -} - -impl MultiplexedSender { - /// Sends a protocol message through the multiplexed channel. - /// - /// # Arguments - /// * `message` - The protocol message to send - /// - /// # Returns - /// * `Ok(())` - If the message was successfully sent - /// * `Err(Error)` - If there was an error sending the message - /// - /// # Errors - /// Returns an error if the receiving end of the channel has been closed, - /// indicating that the network connection is no longer available. - pub fn send(&self, message: ProtocolMessage) -> Result<(), Error> { - self.inner - .send((self.stream_id, message)) - .map_err(|err| Error::Other(err.to_string())) - } -} - -impl Stream for MultiplexedReceiver { - type Item = ProtocolMessage; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.get_mut().inner).poll_recv(cx) - } -} - -impl Deref for MultiplexedReceiver { - type Target = tokio::sync::mpsc::UnboundedReceiver; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl DerefMut for MultiplexedReceiver { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} - -impl Drop for MultiplexedReceiver { - fn drop(&mut self) { - let _ = self.active_streams.remove(&self.stream_id); - } -} - -// Since a single stream can be used for multiple users, and, multiple users assign seq's independently, -// we need to make a key that is unique for each (send->dest) pair and stream. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -struct CompoundStreamKey { - stream_key: StreamKey, - send_user: UserID, - recv_user: Option, -} - -impl NetworkMultiplexer { - /// Creates a new `NetworkMultiplexer` instance. - /// - /// # Arguments - /// * `network` - The underlying network implementation that implements the Network trait - /// - /// # Type Parameters - /// * `N` - The network type that implements the Network trait - /// - /// # Returns - /// * `Self` - A new `NetworkMultiplexer` instance - /// - /// # Panics - /// This function will panic if the internal receiver has already been taken, which should not happen. - #[allow(clippy::too_many_lines)] - pub fn new(network: N) -> Self { - let (tx_to_networking_layer, mut rx_from_substreams) = - tokio::sync::mpsc::unbounded_channel(); - let my_id = network.public_id(); - let this = NetworkMultiplexer { - to_receiving_streams: Arc::new(DashMap::new()), - unclaimed_receiving_streams: Arc::new(DashMap::new()), - tx_to_networking_layer: MultiplexedSender { - inner: tx_to_networking_layer, - stream_id: StreamKey::default(), - }, - sequence_numbers: Arc::new(DashMap::new()), - my_id, - }; - - let active_streams = this.to_receiving_streams.clone(); - let unclaimed_streams = this.unclaimed_receiving_streams.clone(); - let tx_to_networking_layer = this.tx_to_networking_layer.clone(); - let sequence_numbers = this.sequence_numbers.clone(); - - drop(tokio::spawn(async move { - let network_clone = &network; - - let task1 = async move { - while let Some((stream_id, msg)) = rx_from_substreams.recv().await { - let compound_key = CompoundStreamKey { - stream_key: stream_id, - send_user: msg.sender.user_id, - recv_user: msg.recipient.as_ref().map(|p| p.user_id), - }; - - let mut seq = sequence_numbers.entry(compound_key).or_insert(0); - let current_seq = *seq; - *seq += 1; - - gadget_logging::trace!( - "SEND SEQ {current_seq} FROM {} | StreamKey: {:?}", - msg.sender.user_id, - hex::encode(bincode::serialize(&compound_key).unwrap()) - ); - - let multiplexed_message = MultiplexedMessage { - stream_id, - payload: SequencedMessage { - sequence_number: current_seq, - payload: msg.payload, - }, - }; - - let message = ProtocolMessage { - identifier_info: msg.identifier_info, - sender: msg.sender, - recipient: msg.recipient, - payload: bincode::serialize(&multiplexed_message) - .expect("Failed to serialize message"), - }; - - if let Err(err) = network_clone.send_message(message).await { - gadget_logging::error!("Failed to send message to network: {err:?}"); - break; - } - } - }; - - let task2 = async move { - let mut pending_messages: HashMap< - CompoundStreamKey, - BinaryHeap>, - > = HashMap::default(); - let mut expected_seqs: HashMap = HashMap::default(); - - while let Some(mut msg) = network_clone.next_message().await { - if let Some(recv) = msg.recipient.as_ref() { - if let Some(recv_pk) = &recv.public_key { - if recv_pk != &my_id { - gadget_logging::warn!( - "Received a message not intended for the local user" - ); - } - } - } - - let Ok(multiplexed_message) = - bincode::deserialize::(&msg.payload) - else { - gadget_logging::error!("Failed to deserialize message (networking)"); - continue; - }; - - let stream_id = multiplexed_message.stream_id; - let compound_key = CompoundStreamKey { - stream_key: stream_id, - send_user: msg.sender.user_id, - recv_user: msg.recipient.as_ref().map(|p| p.user_id), - }; - let seq = multiplexed_message.payload.sequence_number; - msg.payload = multiplexed_message.payload.payload; - - // Get or create the pending heap for this stream - let pending = pending_messages.entry(compound_key).or_default(); - let expected_seq = expected_seqs.entry(compound_key).or_default(); - - let send_user = msg.sender.user_id; - let recv_user = msg.recipient.as_ref().map(|p| p.user_id); - let compound_key_hex = hex::encode(bincode::serialize(&compound_key).unwrap()); - gadget_logging::trace!( - "RECV SEQ {seq} FROM {} as user {:?} | Expecting: {} | StreamKey: {:?}", - send_user, - recv_user, - *expected_seq, - compound_key_hex, - ); - - // Add the message to pending - pending.push(Reverse(PendingMessage { - sequence_number: seq, - message: msg, - })); - - // Try to deliver messages in order - if let Some(active_receiver) = active_streams.get(&stream_id) { - while let Some(Reverse(PendingMessage { - sequence_number, - message: _, - })) = pending.peek() - { - if *sequence_number != *expected_seq { - gadget_logging::error!( - "Sequence number mismatch, expected {} but got {}", - *expected_seq, - sequence_number - ); - break; - } - - gadget_logging::trace!("DELIVERING SEQ {seq} FROM {} as user {:?} | Expecting: {} | StreamKey: {:?}", send_user, recv_user, *expected_seq, compound_key_hex); - - *expected_seq += 1; - - let message = pending.pop().unwrap().0.message; - - if let Err(err) = active_receiver.send(message) { - gadget_logging::error!(%err, "Failed to send message to receiver"); - let _ = active_streams.remove(&stream_id); - break; - } - } - } else { - let (tx, rx) = Self::create_multiplexed_stream_inner( - tx_to_networking_layer.clone(), - &active_streams, - stream_id, - ); - - // Deliver any pending messages in order - while let Some(Reverse(PendingMessage { - sequence_number, - message: _, - })) = pending.peek() - { - if *sequence_number != *expected_seq { - gadget_logging::error!( - "Sequence number mismatch, expected {} but got {}", - *expected_seq, - sequence_number - ); - break; - } - - gadget_logging::warn!("EARLY DELIVERY SEQ {seq} FROM {} as user {:?} | Expecting: {} | StreamKey: {:?}", send_user, recv_user, *expected_seq, compound_key_hex); - - *expected_seq += 1; - - let message = pending.pop().unwrap().0.message; - - if let Err(err) = tx.send(message) { - gadget_logging::error!(%err, "Failed to send message to receiver"); - break; - } - } - - let _ = unclaimed_streams.insert(stream_id, rx); - } - } - }; - - tokio::select! { - () = task1 => { - gadget_logging::error!("Task 1 exited"); - }, - () = task2 => { - gadget_logging::error!("Task 2 exited"); - } - } - })); - - this - } - - /// Creates a new multiplexed stream. - /// - /// # Arguments - /// * `id` - The ID of the stream to create - /// - /// # Returns - /// * `Self` - A new multiplexed stream - pub fn multiplex(&self, id: impl Into) -> SubNetwork { - let id = id.into(); - let my_id = self.my_id; - let mut tx_to_networking_layer = self.tx_to_networking_layer.clone(); - if let Some(unclaimed) = self.unclaimed_receiving_streams.remove(&id) { - tx_to_networking_layer.stream_id = id; - return SubNetwork { - tx: tx_to_networking_layer, - rx: Some(unclaimed.1.into()), - my_id, - }; - } - - let (tx, rx) = Self::create_multiplexed_stream_inner( - tx_to_networking_layer, - &self.to_receiving_streams, - id, - ); - - SubNetwork { - tx, - rx: Some(rx.into()), - my_id, - } - } - - /// Creates a subnetwork, and also forwards all messages to the given channel. The network cannot be used to - /// receive messages since the messages will be forwarded to the provided channel. - /// - /// # Panics - /// - /// This function will panic if the internal receiver has already been taken, which should not happen - /// under normal circumstances. - pub fn multiplex_with_forwarding( - &self, - id: impl Into, - forward_tx: tokio::sync::mpsc::UnboundedSender, - ) -> SubNetwork { - let mut network = self.multiplex(id); - let rx = network.rx.take().expect("Rx from network should be Some"); - let forwarding_task = async move { - let mut rx = rx.into_inner(); - while let Some(msg) = rx.recv().await { - gadget_logging::info!( - "Round {}: Received message from {} to {:?} (id: {})", - msg.identifier_info.round_id, - msg.sender.user_id, - msg.recipient.as_ref().map(|p| p.user_id), - msg.identifier_info.message_id, - ); - if let Err(err) = forward_tx.send(msg) { - gadget_logging::error!(%err, "Failed to forward message to network"); - // TODO: Add AtomicBool to make sending stop - break; - } - } - }; - - drop(tokio::spawn(forwarding_task)); - - network - } - - fn create_multiplexed_stream_inner( - mut tx_to_networking_layer: MultiplexedSender, - active_streams: &ActiveStreams, - stream_id: StreamKey, - ) -> (MultiplexedSender, MultiplexedReceiver) { - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - if active_streams.insert(stream_id, tx).is_some() { - gadget_logging::warn!( - "Stream ID {stream_id:?} already exists! Existing stream will be replaced" - ); - } - tx_to_networking_layer.stream_id = stream_id; - - ( - tx_to_networking_layer, - MultiplexedReceiver { - inner: rx, - stream_id, - active_streams: active_streams.clone(), - }, - ) - } -} - -impl From for NetworkMultiplexer { - fn from(network: N) -> Self { - Self::new(network) - } -} - -pub struct SubNetwork { - tx: MultiplexedSender, - rx: Option>, - my_id: GossipMsgPublicKey, -} - -impl SubNetwork { - /// Sends a protocol message through the subnetwork. - /// - /// # Arguments - /// * `message` - The protocol message to send - /// - /// # Returns - /// * `Ok(())` - If the message was successfully sent - /// * `Err(Error)` - If there was an error sending the message - /// - /// # Errors - /// * Returns an error if the underlying network connection is closed or unavailable - pub fn send(&self, message: ProtocolMessage) -> Result<(), Error> { - self.tx.send(message) - } - - pub async fn recv(&self) -> Option { - self.rx.as_ref()?.lock().await.next().await - } -} - -#[async_trait] -impl Network for SubNetwork { - async fn next_message(&self) -> Option { - self.recv().await - } - - async fn send_message(&self, message: ProtocolMessage) -> Result<(), Error> { - self.send(message) - } - - fn public_id(&self) -> GossipMsgPublicKey { - self.my_id - } -} diff --git a/crates/networking/src/networking/tests.rs b/crates/networking/src/networking/tests.rs deleted file mode 100644 index 6f1068636..000000000 --- a/crates/networking/src/networking/tests.rs +++ /dev/null @@ -1,769 +0,0 @@ -use self::std::time::Duration; -use super::*; -use crate::gossip::GossipHandle; -use futures::{stream, StreamExt}; -use gadget_crypto::hashing::blake3_256; -use gadget_crypto::KeyType; -use gadget_logging::setup_log; -use gadget_std::collections::BTreeMap; -use gadget_std::sync::LazyLock; -use serde::{Deserialize, Serialize}; -use tokio::time::sleep; - -const TOPIC: &str = "/gadget/test/1.0.0"; - -fn deserialize<'a, T>(data: &'a [u8]) -> Result -where - T: Deserialize<'a>, -{ - bincode::deserialize(data).map_err(|err| Error::Other(err.to_string())) -} - -#[derive(Serialize, Deserialize, Debug)] -struct StressTestPayload { - value: u64, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -enum Msg { - Round1(Round1Msg), - Round2(Round2Msg), - Round3(Round3Msg), -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -struct Round1Msg { - pub power: u16, - pub hitpoints: u16, - pub armor: u16, - pub name: String, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -struct Round2Msg { - pub x: u16, - pub y: u16, - pub z: u16, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -struct Round3Msg { - rotation: u16, - velocity: (u16, u16, u16), -} - -async fn wait_for_nodes_connected(nodes: &[GossipHandle]) { - let node_count = nodes.len(); - - // wait for the nodes to connect to each other - let max_retries = 10 * node_count; - let mut retry = 0; - loop { - gadget_logging::debug!(%node_count, %max_retries, %retry, "Checking if all nodes are connected to each other"); - let connected = nodes - .iter() - .map(super::super::gossip::GossipHandle::connected_peers) - .collect::>(); - - let all_connected = connected - .iter() - .enumerate() - .inspect(|(node, peers)| { - gadget_logging::debug!("Node {node} has {peers} connected peers"); - }) - .all(|(_, &peers)| peers >= node_count - 1); - if all_connected { - gadget_logging::debug!("All nodes are connected to each other"); - return; - } - sleep(Duration::from_millis(300)).await; - retry += 1; - assert!( - retry <= max_retries, - "Failed to connect all nodes to each other" - ); - } -} - -#[tokio::test(flavor = "multi_thread")] -#[allow(clippy::cast_possible_truncation)] -async fn test_p2p() { - setup_log(); - let nodes = stream::iter(0..*NODE_COUNT) - .map(|_| node()) - .collect::>() - .await; - - wait_for_nodes_connected(&nodes).await; - - let mut mapping = BTreeMap::new(); - for (i, node) in nodes.iter().enumerate() { - mapping.insert(i as u16, node.my_id); - } - - let mut tasks = Vec::new(); - for (i, node) in nodes.into_iter().enumerate() { - let task = tokio::spawn(run_protocol(node, i as u16, mapping.clone())); - tasks.push(task); - } - // Wait for all tasks to finish - let results = futures::future::try_join_all(tasks) - .await - .expect("Failed to run protocol"); - // Assert that all are okay. - assert!( - results.iter().all(std::result::Result::is_ok), - "Some nodes failed to run protocol" - ); -} - -#[allow(clippy::too_many_lines, clippy::cast_possible_truncation)] -async fn run_protocol( - node: N, - i: u16, - mapping: BTreeMap, -) -> Result<(), crate::Error> { - let task_hash = [0u8; 32]; - // Safety note: We should be passed a NetworkMultiplexer, and all uses of the N: Network - // used throughout the program must also use the multiplexer to prevent mixed messages. - let multiplexer = NetworkMultiplexer::new(node); - - let round1_network = multiplexer.multiplex(StreamKey { - task_hash, // To differentiate between different instances of a running program (i.e., a task) - round_id: 0, // To differentiate between different subsets of a running task - }); - - let round2_network = multiplexer.multiplex(StreamKey { - task_hash, // To differentiate between different instances of a running program (i.e., a task) - round_id: 1, // To differentiate between different subsets of a running task - }); - - let round3_network = multiplexer.multiplex(StreamKey { - task_hash, // To differentiate between different instances of a running program (i.e., a task) - round_id: 2, // To differentiate between different subsets of a running task - }); - - //let (round1_tx, round1_rx) = node. - // Round 1 (broadcast) - let msg = { - let round = Round1Msg { - power: i * 100, - hitpoints: (i + 1) * 50, - armor: i + 2, - name: format!("Player {}", i), - }; - round1_network.build_protocol_message( - IdentifierInfo { - message_id: 0, - round_id: 0, - }, - i, - None, - &Msg::Round1(round), - None, - ) - }; - - gadget_logging::debug!("Broadcast Message"); - round1_network - .send(msg) - .map_err(|_| crate::Error::Other("Failed to send message".into()))?; - - // Wait for all other nodes to send their messages - let mut msgs = BTreeMap::new(); - while let Some(msg) = round1_network.recv().await { - let m = deserialize::(&msg.payload).unwrap(); - gadget_logging::debug!(from = %msg.sender.user_id, ?m, "Received message"); - // Expecting Round1 message - assert!( - matches!(m, Msg::Round1(_)), - "Expected Round1 message but got {:?} from node {}", - m, - msg.sender.user_id, - ); - let old = msgs.insert(msg.sender.user_id, m); - assert!( - old.is_none(), - "Duplicate message from node {}", - msg.sender.user_id, - ); - // Break if all messages are received - if msgs.len() == *NODE_COUNT - 1 { - break; - } - } - gadget_logging::debug!("Done r1 w/ {i}"); - - // Round 2 (P2P) - let msgs = (0..*NODE_COUNT) - .map(|r| r as u16) - .filter(|&j| j != i) - .map(|j| { - let peer_pk = mapping.get(&j).copied().unwrap(); - round2_network.build_protocol_message( - IdentifierInfo { - message_id: 0, - round_id: 0, - }, - i, - Some(j), - &Msg::Round2(Round2Msg { - x: i * 10, - y: (i + 1) * 20, - z: i + 2, - }), - Some(peer_pk), - ) - }) - .collect::>(); - for msg in msgs { - let to = msg - .recipient - .map(|r| r.user_id) - .expect("Recipient should be present for P2P message. This is a bug in the test code"); - gadget_logging::debug!(%to, "Send P2P Message"); - round2_network.send(msg)?; - } - - // Wait for all other nodes to send their messages - let mut msgs = BTreeMap::new(); - while let Some(msg) = round2_network.recv().await { - let m = deserialize::(&msg.payload).unwrap(); - gadget_logging::info!( - "[Node {}] Received message from {} | Intended Recipient: {}", - i, - msg.sender.user_id, - msg.recipient - .as_ref() - .map_or_else(|| "Broadcast".into(), |r| r.user_id.to_string()) - ); - // Expecting Round2 message - assert!( - matches!(m, Msg::Round2(_)), - "Expected Round2 message but got {:?} from node {}", - m, - msg.sender.user_id, - ); - let old = msgs.insert(msg.sender.user_id, m); - assert!( - old.is_none(), - "Duplicate message from node {}", - msg.sender.user_id, - ); - // Break if all messages are received - if msgs.len() == *NODE_COUNT - 1 { - break; - } - } - gadget_logging::debug!("Done r2 w/ {i}"); - - // Round 3 (broadcast) - - let msg = { - let round = Round3Msg { - rotation: i * 30, - velocity: (i + 1, i + 2, i + 3), - }; - round3_network.build_protocol_message( - IdentifierInfo { - message_id: 0, - round_id: 0, - }, - i, - None, - &Msg::Round3(round), - None, - ) - }; - - gadget_logging::debug!("Broadcast Message"); - round3_network.send(msg)?; - - // Wait for all other nodes to send their messages - let mut msgs = BTreeMap::new(); - while let Some(msg) = round3_network.recv().await { - let m = deserialize::(&msg.payload).unwrap(); - gadget_logging::debug!(from = %msg.sender.user_id, ?m, "Received message"); - // Expecting Round3 message - assert!( - matches!(m, Msg::Round3(_)), - "Expected Round3 message but got {:?} from node {}", - m, - msg.sender.user_id, - ); - let old = msgs.insert(msg.sender.user_id, m); - assert!( - old.is_none(), - "Duplicate message from node {}", - msg.sender.user_id, - ); - // Break if all messages are received - if msgs.len() == *NODE_COUNT - 1 { - break; - } - } - gadget_logging::debug!("Done r3 w/ {i}"); - - gadget_logging::info!(node = i, "Protocol completed"); - - Ok(()) -} - -fn node_with_id() -> (GossipHandle, crate::key_types::GossipMsgKeyPair) { - let identity = libp2p::identity::Keypair::generate_ed25519(); - let crypto_key = crate::key_types::Curve::generate_with_seed(None).unwrap(); - let bind_port = 0; - let handle = crate::setup::start_p2p_network(crate::setup::NetworkConfig::new_service_network( - identity, - crypto_key.clone(), - Vec::default(), - bind_port, - TOPIC, - )) - .unwrap(); - - (handle, crypto_key) -} - -fn node() -> GossipHandle { - node_with_id().0 -} - -static NODE_COUNT: LazyLock = - LazyLock::new(|| std::env::var("IN_CI").map_or_else(|_| 10, |_| 2)); -#[allow(dead_code)] -static MESSAGE_COUNT: LazyLock = - LazyLock::new(|| std::env::var("IN_CI").map_or_else(|_| 10, |_| 100)); - -#[tokio::test(flavor = "multi_thread")] -#[allow(clippy::cast_possible_truncation)] -async fn test_stress_test_multiplexer() { - setup_log(); - gadget_logging::info!("Starting test_stress_test_multiplexer"); - - let (network0, network1) = get_networks().await; - - let multiplexer0 = NetworkMultiplexer::new(network0); - let multiplexer1 = NetworkMultiplexer::new(network1); - - let stream_key = StreamKey { - task_hash: blake3_256(&[1]), - round_id: 0, - }; - - let _subnetwork0 = multiplexer0.multiplex(stream_key); - let _subnetwork1 = multiplexer1.multiplex(stream_key); - - // Create a channel for forwarding - let (forward_tx, mut forward_rx) = tokio::sync::mpsc::unbounded_channel(); - - // Create a subnetwork with forwarding - let subnetwork0 = multiplexer0.multiplex(stream_key); - let subnetwork1 = multiplexer1.multiplex_with_forwarding(stream_key, forward_tx); - - let payload = StressTestPayload { value: 42 }; - let msg = subnetwork0.build_protocol_message( - IdentifierInfo::default(), - 0, - Some(1), - &payload, - Some(subnetwork1.public_id()), - ); - - gadget_logging::info!("Sending message from subnetwork0"); - subnetwork0.send(msg.clone()).unwrap(); - - // Message should be forwarded to the forward_rx channel - let forwarded_msg = forward_rx.recv().await.unwrap(); - let received: StressTestPayload = deserialize(&forwarded_msg.payload).unwrap(); - assert_eq!(received.value, payload.value); -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_nested_multiplexer() { - setup_log(); - gadget_logging::info!("Starting test_nested_multiplexer"); - let (network0, network1) = get_networks().await; - - nested_multiplex(0, 10, network0, network1).await; -} - -async fn get_networks() -> (GossipHandle, GossipHandle) { - let network0 = node(); - let network1 = node(); - - let mut gossip_networks = vec![network0, network1]; - - wait_for_nodes_connected(&gossip_networks).await; - - (gossip_networks.remove(0), gossip_networks.remove(0)) -} - -async fn nested_multiplex( - cur_depth: usize, - max_depth: usize, - network0: N, - network1: N, -) { - gadget_logging::info!("At nested depth = {cur_depth}/{max_depth}"); - - if cur_depth == max_depth { - return; - } - - let multiplexer0 = NetworkMultiplexer::new(network0); - let multiplexer1 = NetworkMultiplexer::new(network1); - - let stream_key = StreamKey { - #[allow(clippy::cast_possible_truncation)] - task_hash: blake3_256(&[(cur_depth % 255) as u8]), - round_id: 0, - }; - - let subnetwork0 = multiplexer0.multiplex(stream_key); - let subnetwork1 = multiplexer1.multiplex(stream_key); - let subnetwork1_id = subnetwork1.public_id(); - - // Send a message in the subnetwork0 to subnetwork1 and vice versa, assert values of message - let payload = StressTestPayload { value: 42 }; - let msg = subnetwork0.build_protocol_message( - IdentifierInfo::default(), - 0, - Some(1), - &payload, - Some(subnetwork1_id), - ); - - gadget_logging::info!("Sending message from subnetwork0"); - subnetwork0.send(msg.clone()).unwrap(); - - // Receive message - let received_msg = subnetwork1.recv().await.unwrap(); - let received: StressTestPayload = deserialize(&received_msg.payload).unwrap(); - assert_eq!(received.value, payload.value); - - let msg = subnetwork1.build_protocol_message( - IdentifierInfo::default(), - 1, - Some(0), - &payload, - Some(subnetwork0.public_id()), - ); - - gadget_logging::info!("Sending message from subnetwork1"); - subnetwork1.send(msg.clone()).unwrap(); - - // Receive message - let received_msg = subnetwork0.recv().await.unwrap(); - let received: StressTestPayload = deserialize(&received_msg.payload).unwrap(); - assert_eq!(received.value, payload.value); - tracing::info!("Done nested depth = {cur_depth}/{max_depth}"); - - Box::pin(nested_multiplex( - cur_depth + 1, - max_depth, - subnetwork0, - subnetwork1, - )) - .await; -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_closed_channel_handling() { - setup_log(); - let (network0, network1) = get_networks().await; - - let multiplexer0 = NetworkMultiplexer::new(network0); - let multiplexer1 = NetworkMultiplexer::new(network1); - - let stream_key = StreamKey { - task_hash: blake3_256(&[1]), - round_id: 0, - }; - - let subnetwork0 = multiplexer0.multiplex(stream_key); - // Drop subnetwork1's receiver to simulate closed channel - let subnetwork1 = multiplexer1.multiplex(stream_key); - drop(subnetwork1); - - let payload = StressTestPayload { value: 42 }; - let msg = - subnetwork0.build_protocol_message(IdentifierInfo::default(), 0, None, &payload, None); - - // Sending to a closed channel should return an error - assert!(subnetwork0.send(msg).is_ok()); // Changed to ok() since the message will be sent but not received -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_empty_payload() { - setup_log(); - let (network0, network1) = get_networks().await; - - let multiplexer0 = NetworkMultiplexer::new(network0); - let multiplexer1 = NetworkMultiplexer::new(network1); - - let stream_key = StreamKey { - task_hash: blake3_256(&[1]), - round_id: 0, - }; - - let subnetwork0 = multiplexer0.multiplex(stream_key); - let subnetwork1 = multiplexer1.multiplex(stream_key); - - // Test empty payload - let empty_payload = StressTestPayload { value: 0 }; - let msg = subnetwork0.build_protocol_message( - IdentifierInfo::default(), - 0, - Some(1), - &empty_payload, - Some(subnetwork1.public_id()), - ); - - gadget_logging::info!("Sending message from subnetwork0"); - subnetwork0.send(msg).unwrap(); - - // Receive message - let received_msg = subnetwork1.recv().await.unwrap(); - let received: StressTestPayload = deserialize(&received_msg.payload).unwrap(); - assert_eq!(received.value, empty_payload.value); -} - -#[tokio::test(flavor = "multi_thread")] -#[allow(clippy::cast_possible_truncation)] -async fn test_concurrent_messaging() { - setup_log(); - let (network0, network1) = get_networks().await; - - let multiplexer0 = NetworkMultiplexer::new(network0); - let multiplexer1 = NetworkMultiplexer::new(network1); - - let mut send_handles = Vec::new(); - let mut receive_handles = Vec::new(); - - // Create multiple messages to send concurrently - let message_count = 10; - - // Spawn tasks to send messages - for i in 0..message_count { - let stream_key = StreamKey { - task_hash: blake3_256(&[i]), - round_id: 0, - }; - - let subnetwork0 = multiplexer0.multiplex(stream_key); - let subnetwork1 = multiplexer1.multiplex(stream_key); - let subnetwork1_id = subnetwork1.public_id(); - - let i_u64: u64 = i.into(); - let payload = StressTestPayload { value: i_u64 }; - let send_subnetwork0 = subnetwork0; - let handle = tokio::spawn(async move { - let msg = send_subnetwork0.build_protocol_message( - IdentifierInfo::default(), - 0, - Some(1), - &payload, - Some(subnetwork1_id), - ); - send_subnetwork0.send(msg).unwrap(); - }); - - send_handles.push(handle); - - // Spawn tasks to receive messages - let handle = tokio::spawn(async move { - let msg = subnetwork1.recv().await.unwrap(); - let received: StressTestPayload = deserialize(&msg.payload).unwrap(); - received.value as u8 // Return the payload value for verification - }); - - receive_handles.push(handle); - } - - // Wait for all sends to complete - for handle in send_handles { - handle.await.unwrap(); - } - - // Wait for all receives and verify we got all messages - let mut received_values = Vec::new(); - for handle in receive_handles { - received_values.push(handle.await.unwrap()); - } - - received_values.sort_unstable(); - assert_eq!(received_values.len(), message_count as usize); - for i in 0..message_count { - assert_eq!(received_values[i as usize], i); - } -} - -#[tokio::test(flavor = "multi_thread")] -#[allow(clippy::cast_possible_truncation)] -async fn test_message_ordering() { - setup_log(); - let (network0, network1) = get_networks().await; - - let multiplexer0 = NetworkMultiplexer::new(network0); - let multiplexer1 = NetworkMultiplexer::new(network1); - - let stream_key = StreamKey { - task_hash: blake3_256(&[1]), - round_id: 0, - }; - - let subnetwork0 = multiplexer0.multiplex(stream_key); - let subnetwork1 = multiplexer1.multiplex(stream_key); - - // Send messages with sequential sequence numbers - let message_count = 10; - for i in 0..message_count { - let payload = StressTestPayload { value: i }; - let msg = subnetwork0.build_protocol_message( - IdentifierInfo { - message_id: i, - ..Default::default() - }, - 0, - Some(1), - &payload, - Some(subnetwork1.public_id()), - ); - subnetwork0.send(msg).unwrap(); - } - - // Verify messages are received in order - let mut last_seq = 0; - for _ in 0..message_count { - let msg = subnetwork1.recv().await.unwrap(); - assert!( - msg.identifier_info.message_id >= last_seq, - "Messages should be received in order or equal to last sequence number" - ); - last_seq = msg.identifier_info.message_id; - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_network_id_handling() { - setup_log(); - let (network0, network1) = get_networks().await; - let _network0_id = network0.public_id(); - let network1_id = network1.public_id(); - - let multiplexer0 = NetworkMultiplexer::new(network0); - let multiplexer1 = NetworkMultiplexer::new(network1); - - let stream_key = StreamKey { - task_hash: blake3_256(&[1]), - round_id: 0, - }; - - let subnetwork0 = multiplexer0.multiplex(stream_key); - let subnetwork1 = multiplexer1.multiplex(stream_key); - - // Test sending with correct network ID - let payload = StressTestPayload { value: 42 }; - let msg = subnetwork0.build_protocol_message( - IdentifierInfo::default(), - 0, - Some(1), - &payload, - Some(network1_id), - ); - gadget_logging::info!("Sending message from subnetwork0"); - subnetwork0.send(msg.clone()).unwrap(); - - // Receive message - let received_msg = subnetwork1.recv().await.unwrap(); - let received: StressTestPayload = deserialize(&received_msg.payload).unwrap(); - assert_eq!(received.value, payload.value); - - // Test sending with wrong network ID - let wrong_key = crate::key_types::Curve::generate_with_seed(None).unwrap(); - let msg = subnetwork0.build_protocol_message( - IdentifierInfo::default(), - 0, - Some(1), - &payload, - Some(wrong_key.public()), - ); - gadget_logging::info!("Sending message from subnetwork0"); - subnetwork0.send(msg).unwrap(); - - // Message with wrong network ID should not be received - let timeout = tokio::time::sleep(tokio::time::Duration::from_millis(100)); - tokio::select! { - () = timeout => (), - _ = subnetwork1.recv() => panic!("Should not receive message with wrong network ID"), - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_stream_isolation() { - setup_log(); - let (network0, network1) = get_networks().await; - - let multiplexer0 = NetworkMultiplexer::new(network0); - let multiplexer1 = NetworkMultiplexer::new(network1); - - // Create two different stream keys - let stream_key1 = StreamKey { - task_hash: blake3_256(&[1]), - round_id: 0, - }; - let stream_key2 = StreamKey { - task_hash: blake3_256(&[2]), - round_id: 0, - }; - - let subnetwork0_stream1 = multiplexer0.multiplex(stream_key1); - let subnetwork0_stream2 = multiplexer0.multiplex(stream_key2); - let subnetwork1_stream1 = multiplexer1.multiplex(stream_key1); - let subnetwork1_stream2 = multiplexer1.multiplex(stream_key2); - - // Send messages on both streams - let payload1 = StressTestPayload { value: 1 }; - let payload2 = StressTestPayload { value: 2 }; - - let msg1 = subnetwork0_stream1.build_protocol_message( - IdentifierInfo::default(), - 0, - Some(1), // Send to node 1 - &payload1, - Some(subnetwork1_stream1.public_id()), - ); - let msg2 = subnetwork0_stream2.build_protocol_message( - IdentifierInfo::default(), - 0, - Some(1), // Send to node 1 - &payload2, - Some(subnetwork1_stream2.public_id()), - ); - - gadget_logging::info!("Sending message from subnetwork0_stream1"); - subnetwork0_stream1.send(msg1.clone()).unwrap(); - gadget_logging::info!("Sending message from subnetwork0_stream2"); - subnetwork0_stream2.send(msg2.clone()).unwrap(); - - // Verify messages are received on correct streams - gadget_logging::info!("Waiting for message on subnetwork1_stream1"); - let received_msg1 = subnetwork1_stream1.recv().await.unwrap(); - gadget_logging::info!("Waiting for message on subnetwork1_stream2"); - let received_msg2 = subnetwork1_stream2.recv().await.unwrap(); - - let received1: StressTestPayload = deserialize(&received_msg1.payload).unwrap(); - let received2: StressTestPayload = deserialize(&received_msg2.payload).unwrap(); - - assert_eq!(received1.value, payload1.value); - assert_eq!(received2.value, payload2.value); - - // Verify no cross-stream message leakage - let timeout = tokio::time::sleep(tokio::time::Duration::from_millis(100)); - tokio::select! { - () = timeout => (), - _ = subnetwork1_stream1.recv() => panic!("Should not receive more messages on stream 1"), - _ = subnetwork1_stream2.recv() => panic!("Should not receive more messages on stream 2"), - } -} diff --git a/crates/networking/src/round_based_compat.rs b/crates/networking/src/round_based_compat.rs deleted file mode 100644 index d89d8f077..000000000 --- a/crates/networking/src/round_based_compat.rs +++ /dev/null @@ -1,229 +0,0 @@ -use crate::key_types::GossipMsgPublicKey; -use crate::networking::{ - IdentifierInfo, NetworkMultiplexer, ProtocolMessage, StreamKey, SubNetwork, -}; -use core::pin::Pin; -use core::sync::atomic::AtomicU64; -use core::task::{ready, Context, Poll}; -use futures::prelude::*; -use gadget_std::collections::{BTreeMap, HashMap, VecDeque}; -use gadget_std::string::ToString; -use gadget_std::sync::Arc; -use round_based::{Delivery, Incoming, MessageType, Outgoing}; -use round_based::{MessageDestination, MsgId, PartyIndex}; -use stream::{SplitSink, SplitStream}; - -use crate::networking::ParticipantInfo; - -pub struct NetworkDeliveryWrapper { - /// The wrapped network implementation. - network: NetworkWrapper, -} - -impl NetworkDeliveryWrapper -where - M: Clone + Send + Unpin + 'static, - M: serde::Serialize + serde::de::DeserializeOwned, -{ - /// Create a new `NetworkDeliveryWrapper` over a network implementation with the given party index. - #[must_use] - pub fn new( - mux: Arc, - i: PartyIndex, - task_hash: [u8; 32], - parties: BTreeMap, - ) -> Self { - let (tx_forward, rx) = tokio::sync::mpsc::unbounded_channel(); - // By default, we create 10 substreams for each party. - let mut sub_streams = HashMap::new(); - for x in 0..10 { - let key = StreamKey { - task_hash, - round_id: x, - }; - // Creates a multiplexed subnetwork, and also forwards all messages to the given channel - let _ = sub_streams.insert(key, mux.multiplex_with_forwarding(key, tx_forward.clone())); - } - - let network = NetworkWrapper { - me: i, - mux, - incoming_queue: VecDeque::new(), - sub_streams, - participants: parties, - task_hash, - tx_forward, - rx, - next_msg_id: Arc::new(NextMessageId::default()), - }; - - NetworkDeliveryWrapper { network } - } -} - -/// A `NetworkWrapper` wraps a network implementation -/// and implements [`Stream`] and [`Sink`] for it. -pub struct NetworkWrapper { - /// The current party index. - me: PartyIndex, - /// Our network Multiplexer. - mux: Arc, - /// A Map of substreams for each round. - sub_streams: HashMap, - /// A queue of incoming messages. - #[allow(dead_code)] - incoming_queue: VecDeque>, - /// Participants in the network with their corresponding public keys. - /// Note: This is a `BTreeMap` to ensure that the participants are sorted by their party index. - participants: BTreeMap, - next_msg_id: Arc, - tx_forward: tokio::sync::mpsc::UnboundedSender, - rx: tokio::sync::mpsc::UnboundedReceiver, - task_hash: [u8; 32], -} - -impl Delivery for NetworkDeliveryWrapper -where - M: Clone + Send + Unpin + 'static, - M: serde::Serialize + serde::de::DeserializeOwned, - M: round_based::ProtocolMessage, -{ - type Send = SplitSink, Outgoing>; - type Receive = SplitStream>; - type SendError = crate::error::Error; - type ReceiveError = crate::error::Error; - - fn split(self) -> (Self::Receive, Self::Send) { - let (sink, stream) = self.network.split(); - (stream, sink) - } -} - -impl Stream for NetworkWrapper -where - M: serde::de::DeserializeOwned + Unpin, - M: round_based::ProtocolMessage, -{ - type Item = Result, crate::error::Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let res = ready!(self.get_mut().rx.poll_recv(cx)); - if let Some(res) = res { - let msg_type = if res.recipient.is_some() { - MessageType::P2P - } else { - MessageType::Broadcast - }; - - let id = res.identifier_info.message_id; - - let msg = match serde_json::from_slice(&res.payload) { - Ok(msg) => msg, - Err(err) => { - gadget_logging::error!(%err, "Failed to deserialize message (round_based_compat)"); - return Poll::Ready(Some(Err(crate::error::Error::Other(err.to_string())))); - } - }; - - Poll::Ready(Some(Ok(Incoming { - msg, - sender: res.sender.user_id, - id, - msg_type, - }))) - } else { - Poll::Ready(None) - } - } -} - -impl Sink> for NetworkWrapper -where - M: Unpin + serde::Serialize, - M: round_based::ProtocolMessage, -{ - type Error = crate::error::Error; - - fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, out: Outgoing) -> Result<(), Self::Error> { - let this = self.get_mut(); - let id = this.next_msg_id.next(); - - let round_id = out.msg.round(); - - gadget_logging::info!( - "Round {}: Sending message from {} to {:?} (id: {})", - round_id, - this.me, - out.recipient, - id, - ); - - // Get the substream to send the message to. - let key = StreamKey { - task_hash: this.task_hash, - round_id: i32::from(round_id), - }; - let substream = this.sub_streams.entry(key).or_insert_with(|| { - this.mux - .multiplex_with_forwarding(key, this.tx_forward.clone()) - }); - - let identifier_info = IdentifierInfo { - message_id: id, - round_id, - }; - let (to, to_network_id) = match out.recipient { - MessageDestination::AllParties => (None, None), - MessageDestination::OneParty(p) => (Some(p), this.participants.get(&p).copied()), - }; - - if matches!(out.recipient, MessageDestination::OneParty(_)) && to_network_id.is_none() { - gadget_logging::warn!("Recipient not found when required for {:?}", out.recipient); - return Err(crate::error::Error::Other( - "Recipient not found".to_string(), - )); - } - - // Manually construct a `ProtocolMessage` since rounds-based - // does not work well with bincode - let protocol_message = ProtocolMessage { - identifier_info, - sender: ParticipantInfo { - user_id: this.me, - public_key: this.participants.get(&this.me).copied(), - }, - recipient: to.map(|user_id| ParticipantInfo { - user_id, - public_key: to_network_id, - }), - payload: serde_json::to_vec(&out.msg).expect("Should be able to serialize message"), - }; - - match substream.send(protocol_message) { - Ok(()) => Ok(()), - Err(e) => Err(e), - } - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -#[derive(Default)] -struct NextMessageId(AtomicU64); - -impl NextMessageId { - fn next(&self) -> MsgId { - self.0 - .fetch_add(1, gadget_std::sync::atomic::Ordering::Relaxed) - } -} diff --git a/crates/networking/src/service.rs b/crates/networking/src/service.rs new file mode 100644 index 000000000..1b2455713 --- /dev/null +++ b/crates/networking/src/service.rs @@ -0,0 +1,560 @@ +use std::{collections::HashSet, sync::Arc, time::Duration}; + +use crate::{ + behaviours::{GadgetBehaviour, GadgetBehaviourEvent}, + blueprint_protocol::{BlueprintProtocolEvent, InstanceMessageRequest, InstanceMessageResponse}, + discovery::{ + behaviour::{DerivedDiscoveryBehaviourEvent, DiscoveryEvent}, + PeerInfo, PeerManager, + }, + error::Error, + key_types::{InstanceMsgKeyPair, InstanceMsgPublicKey}, + service_handle::NetworkServiceHandle, + types::ProtocolMessage, +}; +use crossbeam_channel::{self, Receiver, Sender}; +use futures::StreamExt; +use libp2p::{ + identify, + identity::Keypair, + kad, mdns, ping, + swarm::{dial_opts::DialOpts, SwarmEvent}, + Multiaddr, PeerId, Swarm, SwarmBuilder, +}; +use tracing::trace; +use tracing::{debug, info, warn}; + +/// Events emitted by the network service +#[derive(Debug)] +pub enum NetworkEvent { + /// New request received from a peer + InstanceRequestInbound { + peer: PeerId, + request: InstanceMessageRequest, + }, + /// New response received from a peer + InstanceResponseInbound { + peer: PeerId, + response: InstanceMessageResponse, + }, + /// New request sent to a peer + InstanceRequestOutbound { + peer: PeerId, + request: InstanceMessageRequest, + }, + /// Response sent to a peer + InstanceResponseOutbound { + peer: PeerId, + response: InstanceMessageResponse, + }, + /// New gossip message received + GossipReceived { + source: PeerId, + topic: String, + message: Vec, + }, + /// New gossip message sent + GossipSent { topic: String, message: Vec }, + /// Peer connected + PeerConnected(PeerId), + /// Peer disconnected + PeerDisconnected(PeerId), + /// Handshake completed successfully + HandshakeCompleted { peer: PeerId }, + /// Handshake failed + HandshakeFailed { peer: PeerId, reason: String }, +} + +/// Network message types +#[derive(Debug)] +pub enum NetworkMessage { + InstanceRequest { + peer: PeerId, + request: InstanceMessageRequest, + }, + GossipMessage { + source: PeerId, + topic: String, + message: Vec, + }, +} + +/// Configuration for the network service +#[derive(Debug, Clone)] +pub struct NetworkConfig { + /// Network name/namespace + pub network_name: String, + /// Instance id for blueprint protocol + pub instance_id: String, + /// Instance secret key for blueprint protocol + pub instance_key_pair: InstanceMsgKeyPair, + /// Local keypair for authentication + pub local_key: Keypair, + /// Address to listen on + pub listen_addr: Multiaddr, + /// Target number of peers to maintain + pub target_peer_count: u32, + /// Bootstrap peers to connect to + pub bootstrap_peers: Vec, + /// Whether to enable mDNS discovery + pub enable_mdns: bool, + /// Whether to enable Kademlia DHT + pub enable_kademlia: bool, +} + +pub struct NetworkService { + /// The libp2p swarm + swarm: Swarm, + /// Peer manager for tracking peer states + pub(crate) peer_manager: Arc, + /// Channel for sending messages to the network service + network_sender: Sender, + /// Channel for receiving messages from the network service + network_receiver: Receiver, + /// Channel for sending messages to the network service + protocol_message_sender: Sender, + /// Channel for receiving messages from the network service + protocol_message_receiver: Receiver, + /// Channel for sending events to the network service + event_sender: Sender, + /// Channel for receiving events from the network service + event_receiver: Receiver, + /// Network name/namespace + network_name: String, + /// Bootstrap peers + bootstrap_peers: HashSet, +} + +impl NetworkService { + /// Create a new network service + /// + /// # Errors + /// + /// * See [`GadgetBehaviour::new`] + /// * Bad `listen_addr` in the provided [`NetworkConfig`] + #[allow(clippy::missing_panics_doc)] // Unwrapping an Infallible + pub fn new( + config: NetworkConfig, + allowed_keys: HashSet, + // allowed_keys_rx: Receiver>, + ) -> Result { + let NetworkConfig { + network_name, + instance_id, + instance_key_pair, + local_key, + listen_addr, + target_peer_count, + bootstrap_peers, + enable_mdns: _, + enable_kademlia: _, + } = config; + + let peer_manager = Arc::new(PeerManager::new(allowed_keys)); + let blueprint_protocol_name = format!("{network_name}/{instance_id}"); + + let (network_sender, network_receiver) = crossbeam_channel::unbounded(); + let (protocol_message_sender, protocol_message_receiver) = crossbeam_channel::unbounded(); + let (event_sender, event_receiver) = crossbeam_channel::unbounded(); + + // Create the swarm + let behaviour = GadgetBehaviour::new( + &network_name, + &blueprint_protocol_name, + &local_key, + &instance_key_pair, + target_peer_count, + peer_manager.clone(), + protocol_message_sender.clone(), + )?; + + let mut swarm = SwarmBuilder::with_existing_identity(local_key) + .with_tokio() + .with_tcp( + libp2p::tcp::Config::default().nodelay(true), + libp2p::noise::Config::new, + libp2p::yamux::Config::default, + )? + .with_quic_config(|mut config| { + config.handshake_timeout = Duration::from_secs(30); + config + }) + .with_dns()? + .with_behaviour(|_| behaviour) + .unwrap() + .build(); + + swarm + .behaviour_mut() + .blueprint_protocol + .subscribe(&blueprint_protocol_name)?; + + // Start listening + swarm.listen_on(listen_addr)?; + let bootstrap_peers = bootstrap_peers.into_iter().collect(); + + Ok(Self { + swarm, + peer_manager, + network_sender, + network_receiver, + protocol_message_sender, + protocol_message_receiver, + event_sender, + event_receiver, + network_name, + bootstrap_peers, + }) + } + + /// Get a sender to send messages to the network service + pub fn network_sender(&self) -> Sender { + self.network_sender.clone() + } + + pub fn start(self) -> NetworkServiceHandle { + let local_peer_id = *self.swarm.local_peer_id(); + let network_sender = self.network_sender.clone(); + let protocol_message_receiver = self.protocol_message_receiver.clone(); + + // Create handle with new interface + let handle = NetworkServiceHandle::new( + local_peer_id, + self.swarm + .behaviour() + .blueprint_protocol + .blueprint_protocol_name + .clone(), + self.peer_manager.clone(), + network_sender, + protocol_message_receiver, + ); + + // Add our own peer ID to the peer manager with all listening addresses + let mut info = PeerInfo::default(); + for addr in self.swarm.listeners() { + info.addresses.insert(addr.clone()); + } + self.peer_manager.update_peer(local_peer_id, info); + + // Spawn background task + tokio::spawn(async move { + Box::pin(self.run()).await; + }); + + handle + } + + /// Run the network service + async fn run(mut self) { + info!("Starting network service"); + + // Bootstrap with Kademlia + if let Err(e) = self.swarm.behaviour_mut().bootstrap() { + warn!("Failed to bootstrap with Kademlia: {}", e); + } + + // Connect to bootstrap peers + for addr in &self.bootstrap_peers { + debug!("Dialing bootstrap peer at {}", addr); + if let Err(e) = self.swarm.dial(addr.clone()) { + warn!("Failed to dial bootstrap peer: {}", e); + } + } + + loop { + tokio::select! { + swarm_event = self.swarm.select_next_some() => { + match swarm_event { + SwarmEvent::NewListenAddr { address, .. } => { + info!("New listen address: {}", address); + let local_peer_id = *self.swarm.local_peer_id(); + let mut info = self.peer_manager.get_peer_info(&local_peer_id) + .unwrap_or_default(); + info.addresses.insert(address.clone()); + self.peer_manager.update_peer(local_peer_id, info); + }, + SwarmEvent::Behaviour(event) => { + if let Err(e) = handle_behaviour_event( + &mut self.swarm, + &self.peer_manager, + event, + &self.event_sender, + ) + .await + { + warn!("Failed to handle swarm event: {}", e); + } + }, + _ => {} + } + } + Ok(msg) = async { self.network_receiver.try_recv() } => { + if let Err(e) = handle_network_message( + &mut self.swarm, + msg, + &self.peer_manager, + &self.event_sender, + ) + { + warn!("Failed to handle network message: {}", e); + } + } + else => break, + } + } + + info!("Network service stopped"); + } + + /// Get the current listening address + pub fn get_listen_addr(&self) -> Option { + self.swarm.listeners().next().cloned() + } +} + +/// Handle a swarm event +async fn handle_swarm_event( + swarm: &mut Swarm, + peer_manager: &Arc, + event: SwarmEvent, + event_sender: &Sender, +) -> Result<(), Error> { + if let SwarmEvent::Behaviour(behaviour_event) = event { + handle_behaviour_event(swarm, peer_manager, behaviour_event, event_sender).await?; + } + + Ok(()) +} + +/// Handle a behaviour event +async fn handle_behaviour_event( + swarm: &mut Swarm, + peer_manager: &Arc, + event: GadgetBehaviourEvent, + event_sender: &Sender, +) -> Result<(), Error> { + match event { + GadgetBehaviourEvent::ConnectionLimits(_) => {} + GadgetBehaviourEvent::Discovery(discovery_event) => { + handle_discovery_event(swarm, peer_manager, discovery_event, event_sender)?; + } + GadgetBehaviourEvent::BlueprintProtocol(blueprint_event) => { + handle_blueprint_protocol_event(swarm, peer_manager, blueprint_event, event_sender)?; + } + GadgetBehaviourEvent::Ping(ping_event) => { + handle_ping_event(swarm, peer_manager, ping_event, event_sender)?; + } + } + + Ok(()) +} + +/// Handle a discovery event +fn handle_discovery_event( + swarm: &mut Swarm, + peer_manager: &Arc, + event: DiscoveryEvent, + event_sender: &Sender, +) -> Result<(), Error> { + match event { + DiscoveryEvent::PeerConnected(peer_id) => { + info!("Peer connected, {peer_id}"); + // Update peer info when connected + if let Some(info) = swarm.behaviour().discovery.peer_info.get(&peer_id) { + peer_manager.update_peer(peer_id, info.clone()); + } + event_sender.send(NetworkEvent::PeerConnected(peer_id))?; + } + DiscoveryEvent::PeerDisconnected(peer_id) => { + info!("Peer disconnected, {peer_id}"); + peer_manager.remove_peer(&peer_id, "disconnected"); + event_sender.send(NetworkEvent::PeerDisconnected(peer_id))?; + } + DiscoveryEvent::Discovery(discovery_event) => match &*discovery_event { + DerivedDiscoveryBehaviourEvent::Identify(identify::Event::Received { + peer_id, + info, + .. + }) => { + info!(%peer_id, "Received identify event"); + let protocols: HashSet = info + .protocols + .iter() + .map(std::string::ToString::to_string) + .collect(); + + trace!(%peer_id, ?protocols, "Supported protocols"); + + let blueprint_protocol_name = + &swarm.behaviour().blueprint_protocol.blueprint_protocol_name; + if !protocols.contains(blueprint_protocol_name) { + warn!(%peer_id, %blueprint_protocol_name, "Peer does not support required protocol"); + peer_manager.ban_peer_with_default_duration(*peer_id, "protocol unsupported"); + return Ok(()); + } + + // Get existing peer info or create new one + let mut peer_info = peer_manager.get_peer_info(peer_id).unwrap_or_default(); + + // Update identify info + peer_info.identify_info = Some(info.clone()); + + trace!(%peer_id, listen_addrs=?info.listen_addrs, "Adding identify addresses"); + // Add all addresses from identify info + for addr in &info.listen_addrs { + peer_info.addresses.insert(addr.clone()); + } + + trace!(%peer_id, "Updating peer info with identify information"); + peer_manager.update_peer(*peer_id, peer_info); + debug!(%peer_id, "Successfully processed identify information"); + } + DerivedDiscoveryBehaviourEvent::Identify(_) => { + // Ignore other identify events + } + DerivedDiscoveryBehaviourEvent::Kademlia(kad::Event::OutboundQueryProgressed { + result: kad::QueryResult::GetClosestPeers(Ok(ok)), + .. + }) => { + // Process newly discovered peers + for peer_info in &ok.peers { + if !peer_manager.get_peers().contains_key(&peer_info.peer_id) { + info!(%peer_info.peer_id, "Newly discovered peer from Kademlia"); + let info = PeerInfo::default(); + peer_manager.update_peer(peer_info.peer_id, info); + let addrs: Vec<_> = peer_info.addrs.clone(); + for addr in addrs { + debug!(%peer_info.peer_id, %addr, "Dialing peer from Kademlia"); + if let Err(e) = swarm.dial(DialOpts::from(addr)) { + warn!("Failed to dial address: {}", e); + } + } + } + } + } + DerivedDiscoveryBehaviourEvent::Mdns(mdns::Event::Discovered(list)) => { + // Add newly discovered peers from mDNS + for (peer_id, addr) in list { + if !peer_manager.get_peers().contains_key(peer_id) { + info!(%peer_id, %addr, "Newly discovered peer from Mdns"); + let mut info = PeerInfo::default(); + info.addresses.insert(addr.clone()); + peer_manager.update_peer(*peer_id, info); + debug!(%peer_id, %addr, "Dialing peer from Mdns"); + if let Err(e) = swarm.dial(DialOpts::from(addr.clone())) { + warn!("Failed to dial address: {}", e); + } + } + } + } + _ => {} + }, + } + + Ok(()) +} + +/// Handle a blueprint event +fn handle_blueprint_protocol_event( + _swarm: &mut Swarm, + _peer_manager: &Arc, + event: BlueprintProtocolEvent, + event_sender: &Sender, +) -> Result<(), Error> { + match event { + BlueprintProtocolEvent::Request { + peer, + request, + channel: _, + } => event_sender.send(NetworkEvent::InstanceRequestInbound { peer, request })?, + BlueprintProtocolEvent::Response { + peer, + response, + request_id: _, + } => event_sender.send(NetworkEvent::InstanceResponseInbound { peer, response })?, + BlueprintProtocolEvent::GossipMessage { + source, + topic, + message, + } => event_sender.send(NetworkEvent::GossipReceived { + source, + topic: topic.to_string(), + message, + })?, + } + + Ok(()) +} + +/// Handle a ping event +fn handle_ping_event( + _swarm: &mut Swarm, + _peer_manager: &Arc, + event: ping::Event, + event_sender: &Sender, +) -> Result<(), Error> { + match event.result { + Ok(rtt) => { + trace!( + "PingSuccess::Ping rtt to {} is {} ms", + event.peer, + rtt.as_millis() + ); + } + Err(ping::Failure::Unsupported) => { + debug!(peer=%event.peer, "Ping protocol unsupported"); + } + Err(ping::Failure::Timeout) => { + debug!("Ping timeout: {}", event.peer); + } + Err(ping::Failure::Other { error }) => { + debug!("Ping failure: {error}"); + } + } + + Ok(()) +} + +/// Handle a network message +fn handle_network_message( + swarm: &mut Swarm, + msg: NetworkMessage, + peer_manager: &Arc, + event_sender: &Sender, +) -> Result<(), Error> { + match msg { + NetworkMessage::InstanceRequest { peer, request } => { + // Only send requests to verified peers + if !peer_manager.is_peer_verified(&peer) { + warn!(%peer, "Attempted to send request to unverified peer"); + return Ok(()); + } + + debug!(%peer, ?request, "Sending instance request"); + swarm + .behaviour_mut() + .blueprint_protocol + .send_request(&peer, request.clone()); + event_sender.send(NetworkEvent::InstanceRequestOutbound { peer, request })?; + } + NetworkMessage::GossipMessage { + source, + topic, + message, + } => { + debug!(%source, %topic, "Publishing gossip message"); + if let Err(e) = swarm + .behaviour_mut() + .blueprint_protocol + .publish(&topic, message.clone()) + { + warn!(%source, %topic, "Failed to publish gossip message: {:?}", e); + return Ok(()); + } + event_sender.send(NetworkEvent::GossipSent { topic, message })?; + } + } + + Ok(()) +} diff --git a/crates/networking/src/service_handle.rs b/crates/networking/src/service_handle.rs new file mode 100644 index 000000000..6f62832a7 --- /dev/null +++ b/crates/networking/src/service_handle.rs @@ -0,0 +1,185 @@ +use crate::types::MessageRouting; +use crate::{ + blueprint_protocol::InstanceMessageRequest, + discovery::{PeerInfo, PeerManager}, + service::NetworkMessage, + types::ProtocolMessage, +}; +use crossbeam_channel::{self, Receiver, Sender}; +use libp2p::{Multiaddr, PeerId}; +use std::sync::Arc; +use tokio::task::JoinHandle; +use tracing::debug; + +/// Handle for sending outgoing messages to the network +#[derive(Clone)] +pub struct NetworkSender { + network_message_sender: Sender, +} + +impl NetworkSender { + #[must_use] + pub fn new(network_message_sender: Sender) -> Self { + Self { + network_message_sender, + } + } + + /// Send a protocol message over the network + pub fn send_message(&self, message: NetworkMessage) -> Result<(), String> { + self.network_message_sender + .send(message) + .map_err(|e| e.to_string()) + } +} + +/// Handle for receiving incoming messages from the network +pub struct NetworkReceiver { + protocol_message_receiver: Receiver, +} + +impl NetworkReceiver { + #[must_use] + pub fn new(protocol_message_receiver: Receiver) -> Self { + Self { + protocol_message_receiver, + } + } + + /// Get the next protocol message + pub fn try_recv(&self) -> Result { + self.protocol_message_receiver.try_recv() + } +} + +/// Combined handle for the network service +pub struct NetworkServiceHandle { + pub local_peer_id: PeerId, + pub blueprint_protocol_name: Arc, + pub sender: NetworkSender, + pub receiver: NetworkReceiver, + pub peer_manager: Arc, +} + +impl Clone for NetworkServiceHandle { + fn clone(&self) -> Self { + Self { + local_peer_id: self.local_peer_id, + blueprint_protocol_name: self.blueprint_protocol_name.clone(), + sender: self.sender.clone(), + receiver: NetworkReceiver::new(self.receiver.protocol_message_receiver.clone()), + peer_manager: self.peer_manager.clone(), + } + } +} + +impl NetworkServiceHandle { + #[must_use] + pub fn new( + local_peer_id: PeerId, + blueprint_protocol_name: String, + peer_manager: Arc, + network_message_sender: Sender, + protocol_message_receiver: Receiver, + ) -> Self { + Self { + local_peer_id, + blueprint_protocol_name: Arc::from(blueprint_protocol_name), + sender: NetworkSender::new(network_message_sender), + receiver: NetworkReceiver::new(protocol_message_receiver), + peer_manager, + } + } + + pub fn next_protocol_message(&mut self) -> Option { + self.receiver.try_recv().ok() + } + + #[must_use] + pub fn peers(&self) -> Vec { + self.peer_manager + .get_peers() + .clone() + .into_read_only() + .iter() + .map(|(peer_id, _)| *peer_id) + .collect() + } + + #[must_use] + pub fn peer_info(&self, peer_id: &PeerId) -> Option { + self.peer_manager.get_peer_info(peer_id) + } + + pub fn send(&self, routing: MessageRouting, message: impl Into>) -> Result<(), String> { + let protocol_message = ProtocolMessage { + protocol: self.blueprint_protocol_name.clone().to_string(), + routing, + payload: message.into(), + }; + + let raw_payload = bincode::serialize(&protocol_message).map_err(|err| err.to_string())?; + match protocol_message.routing.recipient { + Some(recipient) => { + let instance_message_request = InstanceMessageRequest::Protocol { + protocol: self.blueprint_protocol_name.clone().to_string(), + payload: raw_payload, + metadata: None, + }; + + let Some(public_key) = recipient.public_key else { + return Ok(()); + }; + + let Some(peer_id) = self.peer_manager.get_peer_id_from_public_key(&public_key) + else { + return Ok(()); + }; + + self.send_network_message(NetworkMessage::InstanceRequest { + peer: peer_id, + request: instance_message_request, + })?; + debug!("Sent outbound p2p `NetworkMessage` to {:?}", peer_id); + } + None => { + let gossip_message = NetworkMessage::GossipMessage { + source: self.local_peer_id, + topic: self.blueprint_protocol_name.clone().to_string(), + message: raw_payload, + }; + self.send_network_message(gossip_message)?; + debug!("Sent outbound gossip `NetworkMessage`"); + } + } + + Ok(()) + } + + pub(crate) fn send_network_message(&self, message: NetworkMessage) -> Result<(), String> { + self.sender.send_message(message) + } + + #[must_use] + pub fn get_listen_addr(&self) -> Option { + // Get the first peer info for our local peer ID + if let Some(peer_info) = self.peer_manager.get_peer_info(&self.local_peer_id) { + // Return the first address from our peer info + peer_info.addresses.iter().next().cloned() + } else { + None + } + } + + /// Split the handle into separate sender and receiver + #[must_use] + pub fn split(self) -> (NetworkSender, NetworkReceiver) { + (self.sender, self.receiver) + } +} + +/// We might also bundle a `JoinHandle` so the user can await its completion if needed. +pub struct NetworkServiceTaskHandle { + /// The join handle for the background service task. + pub service_task: JoinHandle<()>, +} diff --git a/crates/networking/src/setup.rs b/crates/networking/src/setup.rs index 0dee64c84..e69de29bb 100644 --- a/crates/networking/src/setup.rs +++ b/crates/networking/src/setup.rs @@ -1,334 +0,0 @@ -#![allow(unused_results, missing_docs)] - -use crate::error::Error; -use crate::gossip::{ - GossipHandle, IntraNodePayload, MyBehaviour, NetworkServiceWithoutSwarm, MAX_MESSAGE_SIZE, -}; -pub use crate::key_types::GossipMsgKeyPair; -use futures::StreamExt; -use gadget_std as std; -use gadget_std::collections::BTreeMap; -use gadget_std::format; -use gadget_std::io; -use gadget_std::net::IpAddr; -use gadget_std::str::FromStr; -use gadget_std::string::String; -use gadget_std::sync::atomic::AtomicUsize; -use gadget_std::sync::Arc; -use gadget_std::time::Duration; -use gadget_std::vec; -use gadget_std::vec::Vec; -use libp2p::Multiaddr; -use libp2p::{ - gossipsub, gossipsub::IdentTopic, kad::store::MemoryStore, mdns, request_response, - swarm::dial_opts::DialOpts, StreamProtocol, -}; -use lru_mem::LruCache; -use tokio::select; -use tokio::sync::{Mutex, RwLock}; -use tokio::task::{spawn, JoinHandle}; - -/// The version of the gadget sdk -pub const AGENT_VERSION: &str = "tangle/gadget-sdk/1.0.0"; -/// The version of the client -pub const CLIENT_VERSION: &str = "1.0.0"; - -/// The base network configuration for a blueprint's `libp2p` network. -/// -/// This configuration is used to setup the `libp2p` network for a blueprint. -/// Construct using [`NetworkConfig::new`] for advanced users or -/// [`NetworkConfig::new_service_network`] ordinarily. -pub struct NetworkConfig { - pub identity: libp2p::identity::Keypair, - pub secret_key: GossipMsgKeyPair, - pub bootnodes: Vec, - pub bind_port: u16, - pub topics: Vec, -} - -impl gadget_std::fmt::Debug for NetworkConfig { - fn fmt(&self, f: &mut gadget_std::fmt::Formatter<'_>) -> gadget_std::fmt::Result { - f.debug_struct("NetworkConfig") - .field("identity", &self.identity) - .field("bootnodes", &self.bootnodes) - .field("bind_port", &self.bind_port) - .field("topics", &self.topics) - .finish_non_exhaustive() - } -} - -impl NetworkConfig { - /// For advanced use only. Use `NetworkConfig::new_service_network` for ordinary use. - /// This function allows for the creation of a network with multiple topics. - #[must_use] - pub fn new( - identity: libp2p::identity::Keypair, - secret_key: GossipMsgKeyPair, - bootnodes: Vec, - bind_port: u16, - topics: Vec, - ) -> Self { - Self { - identity, - secret_key, - bootnodes, - bind_port, - topics, - } - } - - /// When constructing a network for a single service, the service name is used as the network name. - /// Each service within a blueprint must have a unique network name. - pub fn new_service_network>( - identity: libp2p::identity::Keypair, - secret_key: GossipMsgKeyPair, - bootnodes: Vec, - bind_port: u16, - service_name: T, - ) -> Self { - Self::new( - identity, - secret_key, - bootnodes, - bind_port, - vec![service_name.into()], - ) - } -} - -/// Start a P2P network with the given configuration. -/// -/// Each service will only have one network. It is necessary that each service calling this function -/// uses a distinct network name, otherwise, the network will not be able to distinguish between -/// the different services. -/// -/// # Arguments -/// -/// * `config` - The network configuration. -/// -/// # Errors -/// -/// Returns an error if the network setup fails. -pub fn start_p2p_network(config: NetworkConfig) -> Result { - if config.topics.len() != 1 { - return Err(Error::TooManyTopics(config.topics.len())); - } - - let (networks, _) = multiplexed_libp2p_network(config)?; - let network = networks.into_iter().next().ok_or(Error::NoNetworkFound)?.1; - Ok(network) -} - -pub type NetworkResult = Result<(BTreeMap, JoinHandle<()>), Error>; - -#[allow(clippy::collapsible_else_if, clippy::too_many_lines)] -/// Starts the multiplexed libp2p network with the given configuration. -/// -/// # Arguments -/// -/// * `config` - The network configuration. -/// -/// # Errors -/// -/// Returns an error if the network setup fails. -/// -/// # Panics -/// -/// Panics if the network name is invalid. -pub fn multiplexed_libp2p_network(config: NetworkConfig) -> NetworkResult { - // Setup both QUIC (UDP) and TCP transports the increase the chances of NAT traversal - - use gadget_std::collections::BTreeMap; - gadget_logging::trace!("Building P2P Network with config: {config:?}"); - let NetworkConfig { - identity, - bootnodes, - bind_port, - topics, - secret_key, - } = config; - - // Ensure all topics are unique - let topics_unique = topics - .iter() - .cloned() - .collect::>() - .into_iter() - .collect::>(); - - if topics_unique.len() != topics.len() { - return Err(Error::DuplicateTopics); - } - - let networks = topics; - - let my_pk = secret_key.public(); - let my_id = identity.public().to_peer_id(); - - let mut swarm = libp2p::SwarmBuilder::with_existing_identity(identity) - .with_tokio() - .with_tcp( - libp2p::tcp::Config::default().nodelay(true), // Allow port reuse for TCP-hole punching - libp2p::noise::Config::new, - libp2p::yamux::Config::default, - )? - .with_quic_config(|mut config| { - config.handshake_timeout = Duration::from_secs(30); - config - }) - .with_dns()? - .with_behaviour(|key| { - // Set a custom gossipsub configuration - let gossipsub_config = gossipsub::ConfigBuilder::default() - .protocol_id_prefix("/tangle/gadget-binary-sdk/meshsub") - .max_transmit_size(MAX_MESSAGE_SIZE) - .validate_messages() - .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) - .build() - .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`. - - // Setup gossipsub network behaviour for broadcasting - let gossipsub = gossipsub::Behaviour::new( - gossipsub::MessageAuthenticity::Signed(key.clone()), - gossipsub_config, - )?; - - // Setup mDNS for peer discovery - let mdns = - mdns::tokio::Behaviour::new(mdns::Config::default(), key.public().to_peer_id())?; - - // Setup request-response for direct messaging - let p2p_config = request_response::Config::default(); - // StreamProtocols MUST begin with a forward slash - let protocols = networks - .iter() - .map(|n| { - ( - StreamProtocol::try_from_owned(n.clone()).expect("Invalid network name"), - request_response::ProtocolSupport::Full, - ) - }) - .collect::>(); - - let p2p = request_response::Behaviour::new(protocols, p2p_config); - - // Setup the identify protocol for peers to exchange information about each other, a requirement for kadmelia DHT - let identify = libp2p::identify::Behaviour::new( - libp2p::identify::Config::new(CLIENT_VERSION.into(), key.public()) - .with_agent_version(AGENT_VERSION.into()), - ); - - // Setup kadmelia for DHT for peer discovery over a larger network - let memory_db = MemoryStore::new(key.public().to_peer_id()); - let kadmelia = libp2p::kad::Behaviour::new(key.public().to_peer_id(), memory_db); - - // Setup dcutr for upgrading existing connections to use relay against the bootnodes when necessary - // This also provided hole-punching capabilities to attempt to seek a direct connection, and fallback to relaying - // otherwise. - // dcutr = direct connection upgrade through relay - let dcutr = libp2p::dcutr::Behaviour::new(key.public().to_peer_id()); - - // Setup relay for using the dcutr-upgraded connections to relay messages for other peers when required - let relay_config = libp2p::relay::Config::default(); - let relay = libp2p::relay::Behaviour::new(key.public().to_peer_id(), relay_config); - - // Setup ping for liveness checks between connections - let ping = libp2p::ping::Behaviour::new(libp2p::ping::Config::default()); - - Ok(MyBehaviour { - gossipsub, - mdns, - p2p, - identify, - kadmelia, - dcutr, - relay, - ping, - }) - })? - .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) - .build(); - - gadget_logging::trace!("~~~ Starting P2P Network Setup Phase 1 ~~~"); - - // Subscribe to all networks - let mut inbound_mapping = Vec::new(); - let (tx_to_outbound, mut rx_to_outbound) = - tokio::sync::mpsc::unbounded_channel::(); - let public_key_to_libp2p_id = Arc::new(RwLock::new(BTreeMap::new())); - let mut handles_ret = BTreeMap::new(); - let connected_peers = Arc::new(AtomicUsize::new(0)); - for network in networks { - let topic = IdentTopic::new(network.clone()); - swarm.behaviour_mut().gossipsub.subscribe(&topic)?; - let (inbound_tx, inbound_rx) = tokio::sync::mpsc::unbounded_channel(); - inbound_mapping.push((topic.clone(), inbound_tx, connected_peers.clone())); - - handles_ret.insert( - network, - GossipHandle { - connected_peers: connected_peers.clone(), - topic, - tx_to_outbound: tx_to_outbound.clone(), - rx_from_inbound: Arc::new(Mutex::new(inbound_rx)), - public_key_to_libp2p_id: public_key_to_libp2p_id.clone(), - // Each key is 32 bytes, therefore 512 messages hashes can be stored in the set - recent_messages: LruCache::new(16 * 1024).into(), - my_id: my_pk, - }, - ); - } - - gadget_logging::trace!("~~~ Starting P2P Network Setup Phase 2 ~~~"); - - let ips_to_bind_to = [ - IpAddr::from_str("::").unwrap(), // IN_ADDR_ANY_V6 - IpAddr::from_str("0.0.0.0").unwrap(), // IN_ADDR_ANY_V4 - ]; - - for addr in ips_to_bind_to { - let ip_label = if addr.is_ipv4() { "ip4" } else { "ip6" }; - // Bind to both UDP and TCP to increase probability of successful NAT traversal. - // Use QUIC over UDP to have reliable ordered transport like TCP. - swarm.listen_on(format!("/{ip_label}/{addr}/udp/{bind_port}/quic-v1").parse()?)?; - swarm.listen_on(format!("/{ip_label}/{addr}/tcp/{bind_port}").parse()?)?; - } - - gadget_logging::trace!("~~~ Starting P2P Network Setup Phase 3 ~~~"); - // Dial all bootnodes - for bootnode in &bootnodes { - swarm.dial( - DialOpts::unknown_peer_id() - .address(bootnode.clone()) - .build(), - )?; - } - - let worker = async move { - let span = tracing::debug_span!("network_worker"); - let _enter = span.enter(); - let service = NetworkServiceWithoutSwarm { - inbound_mapping: &inbound_mapping, - connected_peers, - public_key_to_libp2p_id, - secret_key: &secret_key, - span: tracing::debug_span!(parent: &span, "network_service"), - my_id, - }; - - loop { - select! { - // Setup outbound channel - Some(msg) = rx_to_outbound.recv() => { - service.with_swarm(&mut swarm).handle_intra_node_payload(msg); - } - event = swarm.select_next_some() => { - service.with_swarm(&mut swarm).handle_swarm_event(event).await; - } - } - } - }; - - let spawn_handle = spawn(worker); - Ok((handles_ret, spawn_handle)) -} diff --git a/crates/networking/src/test_helpers.rs b/crates/networking/src/test_helpers.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/crates/networking/src/test_helpers.rs @@ -0,0 +1 @@ + diff --git a/crates/networking/src/tests/blueprint_protocol.rs b/crates/networking/src/tests/blueprint_protocol.rs new file mode 100644 index 000000000..040ee064d --- /dev/null +++ b/crates/networking/src/tests/blueprint_protocol.rs @@ -0,0 +1,564 @@ +// TODO +// use gadget_crypto::KeyType; +// use libp2p::PeerId; +// use serde::{Deserialize, Serialize}; +// use std::{collections::HashSet, time::Duration}; +// use tokio::time::timeout; +// use tracing::info; +// +// use crate::{ +// blueprint_protocol::{InstanceMessageRequest, InstanceMessageResponse}, +// key_types::{Curve, InstanceMsgKeyPair, InstanceMsgPublicKey}, +// service::NetworkMessage, +// service_handle::NetworkServiceHandle, +// tests::{ +// create_whitelisted_nodes, wait_for_all_handshakes, wait_for_handshake_completion, TestNode, +// }, +// types::{MessageRouting, ParticipantId, ParticipantInfo, ProtocolMessage}, +// }; +// +// const TEST_TIMEOUT: Duration = Duration::from_secs(10); +// const PROTOCOL_NAME: &str = "summation/1.0.0"; +// +// // Protocol message types +// #[derive(Debug, Clone, Serialize, Deserialize)] +// enum SummationMessage { +// Number(u64), +// Verification { sum: u64 }, +// } +// +// // Helper to create a protocol message +// fn create_protocol_message( +// protocol: &str, +// msg: SummationMessage, +// sender: &NetworkServiceHandle, +// recipient: Option, +// ) -> ProtocolMessage { +// ProtocolMessage { +// protocol: protocol.to_string(), +// routing: MessageRouting { +// message_id: 0, +// round_id: 0, +// sender: ParticipantInfo { +// id: ParticipantId(0), +// public_key: None, +// }, +// recipient: recipient.map(|peer_id| ParticipantInfo { +// id: ParticipantId(0), +// public_key: None, +// }), +// }, +// payload: bincode::serialize(&msg).expect("Failed to serialize message"), +// } +// } +// +// // Helper to extract number from message +// fn extract_number_from_message(msg: &ProtocolMessage) -> u64 { +// match bincode::deserialize::(&msg.payload).expect("Failed to deserialize") { +// SummationMessage::Number(n) => n, +// _ => panic!("Expected number message"), +// } +// } +// +// // Helper to extract sum from verification message +// fn extract_sum_from_verification(msg: &ProtocolMessage) -> u64 { +// match bincode::deserialize::(&msg.payload).expect("Failed to deserialize") { +// SummationMessage::Verification { sum } => sum, +// _ => panic!("Expected verification message"), +// } +// } +// +// #[tokio::test] +// async fn test_summation_protocol_basic() { +// super::init_tracing(); +// info!("Starting summation protocol test"); +// +// // Create nodes with whitelisted keys +// let instance_key_pair2 = Curve::generate_with_seed(None).unwrap(); +// let mut allowed_keys1 = HashSet::new(); +// allowed_keys1.insert(instance_key_pair2.public()); +// +// let mut node1 = TestNode::new("test-net", "sum-test", allowed_keys1, vec![]).await; +// +// let mut allowed_keys2 = HashSet::new(); +// allowed_keys2.insert(node1.instance_key_pair.public()); +// let mut node2 = TestNode::new_with_keys( +// "test-net", +// "sum-test", +// allowed_keys2, +// vec![], +// Some(instance_key_pair2), +// None, +// ) +// .await; +// +// info!("Starting nodes"); +// let mut handle1 = node1.start().await.expect("Failed to start node1"); +// let mut handle2 = node2.start().await.expect("Failed to start node2"); +// +// info!("Waiting for handshake completion"); +// wait_for_handshake_completion(&handle1, &handle2, TEST_TIMEOUT).await; +// +// // Generate test numbers +// let num1 = 42; +// let num2 = 58; +// let expected_sum = num1 + num2; +// +// info!("Sending numbers via gossip"); +// // Send numbers via gossip +// handle1 +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Number(num1), +// &handle1, +// None, +// )) +// .expect("Failed to send number from node1"); +// +// handle2 +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Number(num2), +// &handle2, +// None, +// )) +// .expect("Failed to send number from node2"); +// +// info!("Waiting for messages to be processed"); +// // Wait for messages and compute sums +// let mut sum1 = 0; +// let mut sum2 = 0; +// let mut node1_received = false; +// let mut node2_received = false; +// +// timeout(TEST_TIMEOUT, async { +// loop { +// // Process incoming messages +// if let Some(msg) = handle1.next_protocol_message() { +// if !node1_received { +// sum1 += extract_number_from_message(&msg); +// node1_received = true; +// } +// } +// if let Some(msg) = handle2.next_protocol_message() { +// if !node2_received { +// sum2 += extract_number_from_message(&msg); +// node2_received = true; +// } +// } +// +// // Check if both nodes have received messages +// if node1_received && node2_received { +// break; +// } +// tokio::time::sleep(Duration::from_millis(100)).await; +// } +// }) +// .await +// .expect("Timeout waiting for summation completion"); +// +// info!("Verifying sums via P2P messages"); +// // Verify sums via P2P messages +// handle1 +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Verification { sum: sum1 }, +// &handle1, +// Some(node2.peer_id), +// )) +// .expect("Failed to send verification from node1"); +// +// handle2 +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Verification { sum: sum2 }, +// &handle2, +// Some(node1.peer_id), +// )) +// .expect("Failed to send verification from node2"); +// +// info!("Waiting for verification messages"); +// // Wait for verification messages +// timeout(TEST_TIMEOUT, async { +// let mut node1_verified = false; +// let mut node2_verified = false; +// +// loop { +// // Process verification messages +// if let Some(msg) = handle1.next_protocol_message() { +// if !node1_verified { +// assert_eq!(extract_sum_from_verification(&msg), expected_sum); +// node1_verified = true; +// } +// } +// if let Some(msg) = handle2.next_protocol_message() { +// if !node2_verified { +// assert_eq!(extract_sum_from_verification(&msg), expected_sum); +// node2_verified = true; +// } +// } +// +// if node1_verified && node2_verified { +// break; +// } +// tokio::time::sleep(Duration::from_millis(100)).await; +// } +// }) +// .await +// .expect("Timeout waiting for verification completion"); +// +// info!("Summation protocol test completed successfully"); +// } +// +// #[tokio::test] +// async fn test_summation_protocol_multi_node() { +// super::init_tracing(); +// info!("Starting multi-node summation protocol test"); +// +// // Create 3 nodes with whitelisted keys +// info!("Creating whitelisted nodes"); +// let mut nodes = create_whitelisted_nodes(3).await; +// info!("Created {} nodes successfully", nodes.len()); +// +// // Start all nodes +// info!("Starting all nodes"); +// let mut handles = Vec::new(); +// for (i, node) in nodes.iter_mut().enumerate() { +// info!("Starting node {}", i); +// handles.push(node.start().await.expect("Failed to start node")); +// info!("Node {} started successfully", i); +// } +// +// // Convert handles to mutable references +// info!("Converting handles to mutable references"); +// let mut handles: Vec<&mut NetworkServiceHandle> = handles.iter_mut().collect(); +// let handles_len = handles.len(); +// info!("Converted {} handles", handles_len); +// +// // Wait for all handshakes to complete +// info!( +// "Waiting for handshake completion between {} nodes", +// handles_len +// ); +// wait_for_all_handshakes(&handles, TEST_TIMEOUT).await; +// info!("All handshakes completed successfully"); +// +// // Generate test numbers +// let numbers = vec![42, 58, 100]; +// let expected_sum: u64 = numbers.iter().sum(); +// info!( +// "Generated test numbers: {:?}, expected sum: {}", +// numbers, expected_sum +// ); +// +// info!("Sending numbers via gossip"); +// // Each node broadcasts its number +// for (i, handle) in handles.iter().enumerate() { +// info!("Node {} broadcasting number {}", i, numbers[i]); +// handle +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Number(numbers[i]), +// handle, +// None, +// )) +// .expect("Failed to send number"); +// info!("Node {} successfully broadcast its number", i); +// // Add a small delay between broadcasts to avoid message collisions +// tokio::time::sleep(Duration::from_millis(100)).await; +// } +// +// info!("Waiting for messages to be processed"); +// // Wait for all nodes to receive all numbers +// let mut sums = vec![0; handles_len]; +// let mut received = vec![0; handles_len]; +// +// timeout(TEST_TIMEOUT, async { +// loop { +// for (i, handle) in handles.iter_mut().enumerate() { +// if let Some(msg) = handle.next_protocol_message() { +// if received[i] < handles_len - 1 { +// let num = extract_number_from_message(&msg); +// sums[i] += num; +// received[i] += 1; +// info!( +// "Node {} received number {}, total sum: {}, received count: {}", +// i, num, sums[i], received[i] +// ); +// } +// } +// } +// +// let all_received = received.iter().all(|&r| r == handles_len - 1); +// info!( +// "Current received counts: {:?}, target count: {}", +// received, +// handles_len - 1 +// ); +// if all_received { +// info!("All nodes have received all numbers"); +// break; +// } +// tokio::time::sleep(Duration::from_millis(100)).await; +// } +// }) +// .await +// .expect("Timeout waiting for summation completion"); +// +// info!("Verifying sums via P2P messages"); +// info!("Final sums: {:?}", sums); +// // Each node verifies with every other node +// for (i, sender) in handles.iter().enumerate() { +// for (j, recipient) in handles.iter().enumerate() { +// if i != j { +// info!( +// "Node {} sending verification sum {} to node {}", +// i, sums[i], j +// ); +// sender +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Verification { sum: sums[i] }, +// sender, +// Some(recipient.local_peer_id), +// )) +// .expect("Failed to send verification"); +// } +// } +// } +// +// info!("Waiting for verification messages"); +// // Wait for all verifications +// timeout(TEST_TIMEOUT, async { +// let mut verified = vec![0; handles_len]; +// loop { +// for (i, handle) in handles.iter_mut().enumerate() { +// if let Some(msg) = handle.next_protocol_message() { +// if verified[i] < handles_len - 1 { +// let sum = extract_sum_from_verification(&msg); +// info!( +// "Node {} received verification sum {}, expected {}", +// i, sum, expected_sum +// ); +// assert_eq!(sum, expected_sum); +// verified[i] += 1; +// info!("Node {} verification count: {}", i, verified[i]); +// } +// } +// } +// +// let all_verified = verified.iter().all(|&v| v == handles_len - 1); +// info!("Current verification counts: {:?}", verified); +// if all_verified { +// info!("All nodes have verified all sums"); +// break; +// } +// tokio::time::sleep(Duration::from_millis(100)).await; +// } +// }) +// .await +// .expect("Timeout waiting for verification completion"); +// +// info!("Multi-node summation protocol test completed successfully"); +// } +// +// #[tokio::test] +// async fn test_summation_protocol_late_join() { +// super::init_tracing(); +// info!("Starting late join summation protocol test"); +// +// // Create 3 nodes but only start 2 initially +// let mut nodes = create_whitelisted_nodes(3).await; +// +// // Start first two nodes +// let mut handles = Vec::new(); +// for node in nodes[..2].iter_mut() { +// handles.push(node.start().await.expect("Failed to start node")); +// } +// +// // Convert handles to mutable references +// let handles_refs: Vec<&mut NetworkServiceHandle> = handles.iter_mut().collect(); +// +// // Wait for initial handshakes +// info!("Waiting for initial handshake completion"); +// wait_for_all_handshakes(&handles_refs, TEST_TIMEOUT).await; +// +// // Initial nodes send their numbers +// let numbers = vec![42, 58, 100]; +// let _expected_sum: u64 = numbers.iter().sum(); +// +// info!("Initial nodes sending numbers"); +// for (i, handle) in handles.iter().enumerate() { +// handle +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Number(numbers[i]), +// handle, +// None, +// )) +// .expect("Failed to send number"); +// } +// +// // Wait for initial nodes to process messages +// timeout(TEST_TIMEOUT, async { +// let mut received = vec![false; 2]; +// loop { +// for (i, handle) in handles.iter_mut().enumerate() { +// if let Some(msg) = handle.next_protocol_message() { +// if !received[i] { +// assert_eq!(extract_number_from_message(&msg), numbers[1 - i]); +// received[i] = true; +// } +// } +// } +// if received.iter().all(|&r| r) { +// break; +// } +// tokio::time::sleep(Duration::from_millis(100)).await; +// } +// }) +// .await +// .expect("Timeout waiting for initial summation"); +// +// // Start the late joining node +// info!("Starting late joining node"); +// handles.push(nodes[2].start().await.expect("Failed to start late node")); +// let all_handles: Vec<&mut NetworkServiceHandle> = handles.iter_mut().collect(); +// +// // Wait for the new node to complete handshakes +// wait_for_all_handshakes(&all_handles, TEST_TIMEOUT).await; +// +// // Late node sends its number and receives history +// info!("Late node sending number and receiving history"); +// handles[2] +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Number(numbers[2]), +// &handles[2], +// None, +// )) +// .expect("Failed to send number from late node"); +// +// // Verify final state +// timeout(TEST_TIMEOUT, async { +// let mut verified = vec![false; handles.len()]; +// loop { +// for (i, handle) in handles.iter_mut().enumerate() { +// if let Some(msg) = handle.next_protocol_message() { +// let num = extract_number_from_message(&msg); +// if !verified[i] && (num == numbers[2] || numbers.contains(&num)) { +// verified[i] = true; +// } +// } +// } +// if verified.iter().all(|&v| v) { +// break; +// } +// tokio::time::sleep(Duration::from_millis(100)).await; +// } +// }) +// .await +// .expect("Timeout waiting for late node synchronization"); +// +// info!("Late join test completed successfully"); +// } +// +// #[tokio::test] +// async fn test_summation_protocol_node_disconnect() { +// super::init_tracing(); +// info!("Starting node disconnect test"); +// +// // Create 3 nodes +// let mut nodes = create_whitelisted_nodes(3).await; +// +// // Start all nodes +// let mut handles = Vec::new(); +// for node in nodes.iter_mut() { +// handles.push(node.start().await.expect("Failed to start node")); +// } +// +// // Convert handles to mutable references +// let handles_refs: Vec<&mut NetworkServiceHandle> = handles.iter_mut().collect(); +// +// // Wait for all handshakes +// wait_for_all_handshakes(&handles_refs, TEST_TIMEOUT).await; +// +// // Send initial numbers +// let numbers = vec![42, 58, 100]; +// for (i, handle) in handles.iter().enumerate() { +// handle +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Number(numbers[i]), +// handle, +// None, +// )) +// .expect("Failed to send number"); +// } +// +// // Wait for initial processing +// timeout(TEST_TIMEOUT, async { +// let mut received = vec![0; handles.len()]; +// loop { +// for (i, handle) in handles.iter_mut().enumerate() { +// if let Some(msg) = handle.next_protocol_message() { +// if received[i] < 2 { +// extract_number_from_message(&msg); +// received[i] += 1; +// } +// } +// } +// if received.iter().all(|&r| r == 2) { +// break; +// } +// tokio::time::sleep(Duration::from_millis(100)).await; +// } +// }) +// .await +// .expect("Timeout waiting for initial messages"); +// +// // Disconnect one node +// info!("Disconnecting node"); +// drop(handles.pop()); +// let mut remaining_handles: Vec<&mut NetworkServiceHandle> = handles.iter_mut().collect(); +// +// // Verify remaining nodes can still communicate +// info!("Verifying remaining nodes can communicate"); +// for (i, sender) in remaining_handles.iter().enumerate() { +// for (j, recipient) in remaining_handles.iter().enumerate() { +// if i != j { +// sender +// .send(create_protocol_message( +// PROTOCOL_NAME, +// SummationMessage::Verification { sum: numbers[i] }, +// sender, +// Some(recipient.local_peer_id), +// )) +// .expect("Failed to send verification"); +// } +// } +// } +// +// // Wait for verification messages +// timeout(TEST_TIMEOUT, async { +// let mut verified = vec![false; remaining_handles.len()]; +// loop { +// for (i, handle) in remaining_handles.iter_mut().enumerate() { +// if let Some(msg) = handle.next_protocol_message() { +// if !verified[i] { +// extract_sum_from_verification(&msg); +// verified[i] = true; +// } +// } +// } +// if verified.iter().all(|&v| v) { +// break; +// } +// tokio::time::sleep(Duration::from_millis(100)).await; +// } +// }) +// .await +// .expect("Timeout waiting for verification after disconnect"); +// +// info!("Node disconnect test completed successfully"); +// } diff --git a/crates/networking/src/tests/discovery.rs b/crates/networking/src/tests/discovery.rs new file mode 100644 index 000000000..90c3f5efd --- /dev/null +++ b/crates/networking/src/tests/discovery.rs @@ -0,0 +1,140 @@ +use super::init_tracing; +use super::TestNode; +use super::{wait_for_peer_discovery, wait_for_peer_info}; +use crate::service::NetworkMessage; +use std::{collections::HashSet, time::Duration}; +use tokio::time::timeout; +use tracing::{debug, info}; +use tracing_subscriber::{fmt, EnvFilter}; + +#[tokio::test] +async fn test_peer_discovery_mdns() { + init_tracing(); + + let network_name = "test-network"; + let instance_id = "test-instance"; + let allowed_keys = HashSet::new(); + + // Create two nodes + let mut node1 = TestNode::new(network_name, instance_id, allowed_keys.clone(), vec![]).await; + let mut node2 = TestNode::new(network_name, instance_id, allowed_keys, vec![]).await; + + // Start both nodes and wait for them to be listening + let handle1 = node1.start().await.expect("Failed to start node1"); + let handle2 = node2.start().await.expect("Failed to start node2"); + + // First wait for basic peer discovery (they see each other) + let discovery_timeout = Duration::from_secs(20); + wait_for_peer_discovery(&[&handle1, &handle2], discovery_timeout) + .await + .expect("Basic peer discovery timed out"); +} + +#[tokio::test] +async fn test_peer_discovery_kademlia() { + init_tracing(); + + let network_name = "test-network"; + let instance_id = "test-instance"; + let allowed_keys = HashSet::new(); + + // Create the first node (bootstrap node) + let mut node1 = TestNode::new(network_name, instance_id, allowed_keys.clone(), vec![]).await; + + // Start node1 and get its listening address + let handle1 = node1.start().await.expect("Failed to start node1"); + let node1_addr = node1.get_listen_addr().expect("Node1 should be listening"); + + // Create two more nodes that will bootstrap from node1 + let bootstrap_peers = vec![node1_addr.clone()]; + let mut node2 = TestNode::new( + network_name, + instance_id, + allowed_keys.clone(), + bootstrap_peers.clone(), + ) + .await; + let mut node3 = TestNode::new(network_name, instance_id, allowed_keys, bootstrap_peers).await; + + // Start the remaining nodes + let handle2 = node2.start().await.expect("Failed to start node2"); + let handle3 = node3.start().await.expect("Failed to start node3"); + + // Wait for peer discovery through Kademlia DHT + let discovery_timeout = Duration::from_secs(20); + match timeout(discovery_timeout, async { + loop { + let peers1 = handle1.peers(); + let peers2 = handle2.peers(); + let peers3 = handle3.peers(); + + if peers1.contains(&node2.peer_id) + && peers1.contains(&node3.peer_id) + && peers2.contains(&node1.peer_id) + && peers2.contains(&node3.peer_id) + && peers3.contains(&node1.peer_id) + && peers3.contains(&node2.peer_id) + { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + { + Ok(_) => println!("All peers discovered each other through Kademlia"), + Err(_) => panic!("Kademlia peer discovery timed out"), + } +} + +#[tokio::test] +async fn test_peer_info_updates() { + init_tracing(); + + let network_name = "test-network"; + let instance_id = "test-instance"; + let allowed_keys = HashSet::new(); + + info!("Creating test nodes..."); + // Create two nodes + let mut node1 = TestNode::new(network_name, instance_id, allowed_keys.clone(), vec![]).await; + let mut node2 = TestNode::new(network_name, instance_id, allowed_keys, vec![]).await; + + info!("Starting node1..."); + let handle1 = node1.start().await.expect("Failed to start node1"); + info!("Node1 started successfully"); + + info!("Starting node2..."); + let handle2 = node2.start().await.expect("Failed to start node2"); + info!("Node2 started successfully"); + + info!("Both nodes started, waiting for peer discovery..."); + + // First wait for basic peer discovery (they see each other) + let discovery_timeout = Duration::from_secs(30); // Increased timeout + match wait_for_peer_discovery(&[&handle1, &handle2], discovery_timeout).await { + Ok(_) => info!("Peer discovery successful"), + Err(e) => { + // Log peer states before failing + info!("Node1 peers: {:?}", handle1.peers()); + info!("Node2 peers: {:?}", handle2.peers()); + panic!("Peer discovery failed: {}", e); + } + } + + info!("Peers discovered each other, waiting for identify info..."); + + // Now wait for identify info to be populated + let identify_timeout = Duration::from_secs(30); // Increased timeout + wait_for_peer_info(&handle1, &handle2, identify_timeout).await; + + info!("Test completed successfully - both nodes have identify info"); + + // Log final state + if let Some(info) = handle1.peer_info(&handle2.local_peer_id) { + info!("Node1's info about Node2: {:?}", info); + } + if let Some(info) = handle2.peer_info(&handle1.local_peer_id) { + info!("Node2's info about Node1: {:?}", info); + } +} diff --git a/crates/networking/src/tests/gossip.rs b/crates/networking/src/tests/gossip.rs new file mode 100644 index 000000000..c3240f533 --- /dev/null +++ b/crates/networking/src/tests/gossip.rs @@ -0,0 +1,199 @@ +use super::{init_tracing, wait_for_handshake_completion, TestNode}; +use crate::{ + key_types::{Curve, InstanceMsgPublicKey}, + service_handle::NetworkServiceHandle, + tests::{create_whitelisted_nodes, wait_for_all_handshakes}, + types::{MessageRouting, ParticipantId, ParticipantInfo, ProtocolMessage}, +}; +use gadget_crypto::KeyType; +use std::{collections::HashSet, time::Duration}; +use tokio::time::timeout; +use tracing::info; + +const TEST_TIMEOUT: Duration = Duration::from_secs(10); +const PROTOCOL_NAME: &str = "/blueprint_protocol/gossip-test/1.0.0"; + +#[tokio::test] +async fn test_gossip_between_verified_peers() { + init_tracing(); + info!("Starting gossip test between verified peers"); + + // Create nodes with whitelisted keys + let instance_key_pair2 = Curve::generate_with_seed(None).unwrap(); + let mut allowed_keys1 = HashSet::new(); + allowed_keys1.insert(instance_key_pair2.public()); + + let mut node1 = TestNode::new("test-net", "gossip-test", allowed_keys1, vec![]).await; + + let mut allowed_keys2 = HashSet::new(); + allowed_keys2.insert(node1.instance_key_pair.public()); + let mut node2 = TestNode::new_with_keys( + "test-net", + "gossip-test", + allowed_keys2, + vec![], + Some(instance_key_pair2), + None, + ) + .await; + + info!("Starting nodes"); + let mut handle1 = node1.start().await.expect("Failed to start node1"); + let mut handle2 = node2.start().await.expect("Failed to start node2"); + + info!("Waiting for handshake completion"); + wait_for_handshake_completion(&handle1, &handle2, TEST_TIMEOUT).await; + + // Create test message + info!("Sending gossip message from node1"); + + let test_payload = b"Hello, gossip network!".to_vec(); + let routing = MessageRouting { + message_id: 1, + round_id: 0, + sender: ParticipantInfo { + id: ParticipantId(1), + public_key: Some(node1.instance_key_pair.public()), + }, + recipient: None, // No specific recipient for gossip + }; + + handle1 + .send(routing, test_payload.clone()) + .expect("Failed to send gossip message"); + + info!("Waiting for node2 to receive the message"); + // Wait for node2 to receive the message + let received_message = timeout(TEST_TIMEOUT, async { + loop { + if let Some(msg) = handle2.next_protocol_message() { + dbg!(&msg); + if msg.protocol == PROTOCOL_NAME { + return msg; + } + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .expect("Timeout waiting for gossip message"); + + // Verify message contents + assert_eq!(received_message.payload, test_payload); + assert_eq!(received_message.protocol, PROTOCOL_NAME); + assert_eq!(received_message.routing.message_id, 1); + assert_eq!(received_message.routing.round_id, 0); + assert_eq!( + received_message.routing.sender.public_key, + Some(node1.instance_key_pair.public()) + ); + assert!(received_message.routing.recipient.is_none()); + + info!("Gossip test completed successfully"); +} + +#[tokio::test] +async fn test_multi_node_gossip() { + init_tracing(); + info!("Starting multi-node gossip test"); + + // Create three nodes with all keys whitelisted + let mut nodes = create_whitelisted_nodes(3).await; + + info!("Starting all nodes"); + let mut handles: Vec<_> = Vec::new(); + for node in nodes.iter_mut() { + handles.push(node.start().await.expect("Failed to start node")); + } + + info!("Waiting for all handshakes to complete"); + let handles_refs: Vec<&mut NetworkServiceHandle> = handles.iter_mut().collect(); + wait_for_all_handshakes(&handles_refs, TEST_TIMEOUT).await; + + // Create test message + let test_payload = b"Multi-node gossip test".to_vec(); + let routing = MessageRouting { + message_id: 1, + round_id: 0, + sender: ParticipantInfo { + id: ParticipantId(0), + public_key: Some(nodes[0].instance_key_pair.public()), + }, + recipient: None, + }; + + info!("Sending gossip message from node 0"); + handles[0] + .send(routing, test_payload.clone()) + .expect("Failed to send gossip message"); + + info!("Waiting for all nodes to receive the message"); + // Wait for all other nodes to receive the message + timeout(TEST_TIMEOUT, async { + for (i, handle) in handles.iter_mut().enumerate().skip(1) { + let received = loop { + if let Some(msg) = handle.next_protocol_message() { + if msg.protocol == PROTOCOL_NAME { + break msg; + } + } + tokio::time::sleep(Duration::from_millis(100)).await; + }; + + assert_eq!( + received.payload, test_payload, + "Node {} received wrong payload", + i + ); + assert_eq!(received.protocol, PROTOCOL_NAME); + assert_eq!( + received.routing.sender.public_key, + Some(nodes[0].instance_key_pair.public()) + ); + info!("Node {} received the gossip message correctly", i); + } + }) + .await + .expect("Timeout waiting for gossip messages"); + + info!("Multi-node gossip test completed successfully"); +} + +#[tokio::test] +async fn test_unverified_peer_gossip() { + init_tracing(); + info!("Starting unverified peer gossip test"); + + // Create two nodes with no whitelisted keys + let mut node1 = TestNode::new("test-net", "gossip-test", HashSet::new(), vec![]).await; + let mut node2 = TestNode::new("test-net", "gossip-test", HashSet::new(), vec![]).await; + + info!("Starting nodes"); + let mut handle1 = node1.start().await.expect("Failed to start node1"); + let mut handle2 = node2.start().await.expect("Failed to start node2"); + + // Create test message + let test_payload = b"This message should not be received".to_vec(); + let routing = MessageRouting { + message_id: 1, + round_id: 0, + sender: ParticipantInfo { + id: ParticipantId(1), + public_key: Some(node1.instance_key_pair.public()), + }, + recipient: None, + }; + + info!("Attempting to send gossip message from unverified node"); + handle1 + .send(routing, test_payload.clone()) + .expect("Failed to send gossip message"); + + // Wait a bit to ensure message is not received + tokio::time::sleep(Duration::from_secs(2)).await; + + // Verify node2 did not receive the message + assert!(handle2.next_protocol_message().is_none()); + + info!("Unverified peer gossip test completed successfully"); +} diff --git a/crates/networking/src/tests/handshake.rs b/crates/networking/src/tests/handshake.rs new file mode 100644 index 000000000..dbe01d714 --- /dev/null +++ b/crates/networking/src/tests/handshake.rs @@ -0,0 +1,133 @@ +use crate::{ + blueprint_protocol::{InstanceMessageRequest, InstanceMessageResponse}, + key_types::{Curve, InstanceMsgKeyPair, InstanceMsgPublicKey}, + service::NetworkMessage, + service_handle::NetworkServiceHandle, + tests::TestNode, +}; +use gadget_crypto::KeyType; +use std::{collections::HashSet, time::Duration}; +use tokio::time::timeout; +use tracing::{debug, info}; +use tracing_subscriber::{fmt, EnvFilter}; + +const TEST_TIMEOUT: Duration = Duration::from_secs(5); + +fn init_tracing() { + let _ = fmt() + .with_env_filter(EnvFilter::from_default_env()) + .with_target(true) + .with_thread_ids(true) + .with_file(true) + .with_line_number(true) + .try_init(); +} + +#[tokio::test] +async fn test_automatic_handshake() { + init_tracing(); + info!("Starting automatic handshake test"); + + let network_name = "test-network"; + let instance_id = "test-instance"; + + // Generate node2's key pair first + let instance_key_pair2 = Curve::generate_with_seed(None).unwrap(); + let mut allowed_keys1 = HashSet::new(); + allowed_keys1.insert(instance_key_pair2.public()); + + // Create node1 with node2's key whitelisted + let mut node1 = TestNode::new(network_name, instance_id, allowed_keys1, vec![]).await; + + // Create node2 with node1's key whitelisted and pre-generated key + let mut allowed_keys2 = HashSet::new(); + allowed_keys2.insert(node1.instance_key_pair.public()); + let mut node2 = TestNode::new_with_keys( + network_name, + instance_id, + allowed_keys2, + vec![], + Some(instance_key_pair2), + None, + ) + .await; + + info!("Starting nodes"); + // Start both nodes - this should trigger automatic handshake + let handle1 = node1.start().await.expect("Failed to start node1"); + let handle2 = node2.start().await.expect("Failed to start node2"); + + // Wait for automatic handshake completion + info!("Waiting for automatic handshake completion"); + timeout(TEST_TIMEOUT, async { + loop { + if handle1.peer_manager.is_peer_verified(&node2.peer_id) + && handle2.peer_manager.is_peer_verified(&node1.peer_id) + { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .expect("Automatic handshake verification timed out"); + + // Verify peer info and identify info are present + let peer_info1 = handle1 + .peer_info(&node2.peer_id) + .expect("Missing peer info for node2"); + let peer_info2 = handle2 + .peer_info(&node1.peer_id) + .expect("Missing peer info for node1"); + + assert!( + peer_info1.identify_info.is_some(), + "Missing identify info for node2" + ); + assert!( + peer_info2.identify_info.is_some(), + "Missing identify info for node1" + ); + + info!("Automatic handshake test completed successfully"); +} + +#[tokio::test] +async fn test_handshake_with_invalid_peer() { + init_tracing(); + info!("Starting invalid peer handshake test"); + + let network_name = "test-network"; + let instance_id = "test-instance"; + + // Create node1 with empty whitelist + let mut node1 = TestNode::new(network_name, instance_id, HashSet::new(), vec![]).await; + + // Create node2 with node1's key whitelisted (but node2's key is not whitelisted by node1) + let mut allowed_keys2 = HashSet::new(); + allowed_keys2.insert(node1.instance_key_pair.public()); + let mut node2 = TestNode::new(network_name, instance_id, allowed_keys2, vec![]).await; + + info!("Starting nodes"); + let handle1 = node1.start().await.expect("Failed to start node1"); + let handle2 = node2.start().await.expect("Failed to start node2"); + + // Wait for ban to be applied automatically + info!("Waiting for automatic ban"); + timeout(TEST_TIMEOUT, async { + loop { + if handle1.peer_manager.is_banned(&node2.peer_id) { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .expect("Ban was not applied"); + + // Verify peers remain unverified + assert!(!handle1.peer_manager.is_peer_verified(&node2.peer_id)); + assert!(!handle2.peer_manager.is_peer_verified(&node1.peer_id)); + + info!("Invalid peer handshake test completed successfully"); +} diff --git a/crates/networking/src/tests/mod.rs b/crates/networking/src/tests/mod.rs new file mode 100644 index 000000000..ddb306077 --- /dev/null +++ b/crates/networking/src/tests/mod.rs @@ -0,0 +1,350 @@ +use crate::service::NetworkMessage; +use crate::{ + key_types::{Curve, InstanceMsgKeyPair, InstanceMsgPublicKey}, + service_handle::NetworkServiceHandle, + NetworkConfig, NetworkService, +}; +use gadget_crypto::KeyType; +use libp2p::{ + identity::{self, Keypair}, + Multiaddr, PeerId, +}; +use std::{collections::HashSet, time::Duration}; +use tokio::time::timeout; +use tracing::info; + +mod blueprint_protocol; +mod discovery; +mod gossip; +mod handshake; + +fn init_tracing() { + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .with_target(true) + .with_thread_ids(true) + .with_file(true) + .with_line_number(true) + .try_init(); +} + +/// Test node configuration for network tests +pub struct TestNode { + pub service: Option, + pub peer_id: PeerId, + pub listen_addr: Option, + pub instance_key_pair: InstanceMsgKeyPair, + pub local_key: Keypair, +} + +impl TestNode { + /// Create a new test node with auto-generated keys + pub async fn new( + network_name: &str, + instance_id: &str, + allowed_keys: HashSet, + bootstrap_peers: Vec, + ) -> Self { + Self::new_with_keys( + network_name, + instance_id, + allowed_keys, + bootstrap_peers, + None, + None, + ) + .await + } + + /// Create a new test node with specified keys + pub async fn new_with_keys( + network_name: &str, + instance_id: &str, + allowed_keys: HashSet, + bootstrap_peers: Vec, + instance_key_pair: Option, + local_key: Option, + ) -> Self { + let local_key = local_key.unwrap_or_else(|| identity::Keypair::generate_ed25519()); + let peer_id = local_key.public().to_peer_id(); + + // Bind to all interfaces instead of just localhost + let listen_addr: Multiaddr = "/ip4/0.0.0.0/tcp/0".parse().unwrap(); + info!("Creating test node {peer_id} with TCP address: {listen_addr}"); + + let instance_key_pair = + instance_key_pair.unwrap_or_else(|| Curve::generate_with_seed(None).unwrap()); + + let config = NetworkConfig { + network_name: network_name.to_string(), + instance_id: instance_id.to_string(), + instance_key_pair: instance_key_pair.clone(), + local_key: local_key.clone(), + listen_addr: listen_addr.clone(), + target_peer_count: 10, + bootstrap_peers, + enable_mdns: true, + enable_kademlia: true, + }; + + let service = + NetworkService::new(config, allowed_keys).expect("Failed to create network service"); + + Self { + service: Some(service), + peer_id, + listen_addr: None, + instance_key_pair, + local_key, + } + } + + /// Start the node and wait for it to be fully initialized + pub async fn start(&mut self) -> Result { + // Take ownership of the service + let service = self.service.take().ok_or("Service already started")?; + let handle = service.start(); + + // Wait for the node to be fully initialized + let timeout_duration = Duration::from_secs(10); // Increased timeout + match timeout(timeout_duration, async { + // First wait for the listening address + while self.listen_addr.is_none() { + if let Some(addr) = handle.get_listen_addr() { + info!("Node {} listening on {}", self.peer_id, addr); + self.listen_addr = Some(addr.clone()); + + // Extract port from multiaddr + let addr_str = addr.to_string(); + let port = addr_str.split("/").nth(4).unwrap_or("0").to_string(); + + // Try localhost first + let localhost_addr = format!("127.0.0.1:{}", port); + match tokio::net::TcpStream::connect(&localhost_addr).await { + Ok(_) => { + info!("Successfully verified localhost port for {}", self.peer_id); + break; + } + Err(e) => { + info!("Localhost port not ready for {}: {}", self.peer_id, e); + // Try external IP + let external_addr = format!("10.0.1.142:{}", port); + match tokio::net::TcpStream::connect(&external_addr).await { + Ok(_) => { + info!( + "Successfully verified external port for {}", + self.peer_id + ); + break; + } + Err(e) => { + info!("External port not ready for {}: {}", self.peer_id, e); + tokio::time::sleep(Duration::from_millis(100)).await; + continue; + } + } + } + } + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + + // Give the node a moment to initialize protocols + tokio::time::sleep(Duration::from_millis(500)).await; + + Ok::<(), &'static str>(()) + }) + .await + { + Ok(Ok(_)) => { + info!("Node {} fully initialized", self.peer_id); + Ok(handle) + } + Ok(Err(e)) => Err(e), + Err(_) => Err("Timeout waiting for node to initialize"), + } + } + + /// Get the actual listening address + pub fn get_listen_addr(&self) -> Option { + self.listen_addr.clone() + } + + /// Update the allowed keys for this node + pub fn update_allowed_keys(&self, allowed_keys: HashSet) { + if let Some(service) = &self.service { + service.peer_manager.update_whitelisted_keys(allowed_keys); + } + } +} + +/// Wait for a condition with timeout +pub async fn wait_for_condition(timeout: Duration, mut condition: F) -> Result<(), &'static str> +where + F: FnMut() -> bool, +{ + let start = std::time::Instant::now(); + while !condition() { + if start.elapsed() > timeout { + return Err("Timeout waiting for condition"); + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + Ok(()) +} + +/// Wait for peers to discover each other +pub async fn wait_for_peer_discovery( + handles: &[&NetworkServiceHandle], + timeout: Duration, +) -> Result<(), &'static str> { + info!("Waiting for peer discovery..."); + + wait_for_condition(timeout, || { + for (i, handle1) in handles.iter().enumerate() { + for (j, handle2) in handles.iter().enumerate() { + if i != j + && !handle1 + .peers() + .iter() + .any(|id| *id == handle2.local_peer_id) + { + return false; + } + } + } + true + }) + .await +} + +/// Wait for peer info to be updated +pub async fn wait_for_peer_info( + handle1: &NetworkServiceHandle, + handle2: &NetworkServiceHandle, + timeout: Duration, +) { + info!("Waiting for identify info..."); + + match tokio::time::timeout(timeout, async { + loop { + let peer_info1 = handle1.peer_info(&handle2.local_peer_id); + let peer_info2 = handle2.peer_info(&handle1.local_peer_id); + + if let Some(peer_info) = peer_info1 { + if peer_info.identify_info.is_some() { + // Also verify reverse direction + if let Some(peer_info) = peer_info2 { + if peer_info.identify_info.is_some() { + info!("Identify info populated in both directions"); + break; + } + } + } + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + { + Ok(_) => info!("Peer info updated successfully in both directions"), + Err(_) => panic!("Peer info update timed out"), + } +} + +// Helper to wait for handshake completion between multiple nodes +async fn wait_for_all_handshakes(handles: &[&mut NetworkServiceHandle], timeout_length: Duration) { + info!("Starting handshake wait for {} nodes", handles.len()); + timeout(timeout_length, async { + loop { + let mut all_verified = true; + for (i, handle1) in handles.iter().enumerate() { + for (j, handle2) in handles.iter().enumerate() { + if i != j { + let verified = handle1 + .peer_manager + .is_peer_verified(&handle2.local_peer_id); + if !verified { + info!("Node {} -> Node {}: handshake not verified yet", i, j); + all_verified = false; + break; + } + } + } + if !all_verified { + break; + } + } + if all_verified { + info!("All handshakes completed successfully"); + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .expect("Handshake verification timed out"); +} + +// Helper to wait for handshake completion between two nodes +async fn wait_for_handshake_completion( + handle1: &NetworkServiceHandle, + handle2: &NetworkServiceHandle, + timeout_length: Duration, +) { + timeout(timeout_length, async { + loop { + if handle1 + .peer_manager + .is_peer_verified(&handle2.local_peer_id) + && handle2 + .peer_manager + .is_peer_verified(&handle1.local_peer_id) + { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .expect("Handshake verification timed out"); +} + +// Helper to create a whitelisted test node +async fn create_node_with_keys( + network: &str, + instance: &str, + allowed_keys: HashSet, + key_pair: Option, +) -> TestNode { + TestNode::new_with_keys(network, instance, allowed_keys, vec![], key_pair, None).await +} + +// Helper to create a set of nodes with whitelisted keys +async fn create_whitelisted_nodes(count: usize) -> Vec { + let mut nodes = Vec::with_capacity(count); + let mut key_pairs = Vec::with_capacity(count); + let mut allowed_keys = HashSet::new(); + + // Generate all key pairs first + for _ in 0..count { + let key_pair = Curve::generate_with_seed(None).unwrap(); + key_pairs.push(key_pair.clone()); + allowed_keys.insert(key_pair.public()); + } + + // Create nodes with whitelisted keys + for i in 0..count { + nodes.push( + create_node_with_keys( + "test-net", + "sum-test", + allowed_keys.clone(), + Some(key_pairs[i].clone()), + ) + .await, + ); + } + + nodes +} diff --git a/crates/networking/src/types.rs b/crates/networking/src/types.rs new file mode 100644 index 000000000..6c1e4f4f9 --- /dev/null +++ b/crates/networking/src/types.rs @@ -0,0 +1,114 @@ +use crate::key_types::InstanceMsgPublicKey; +use libp2p::{gossipsub::IdentTopic, PeerId}; +use serde::{Deserialize, Serialize}; +use std::fmt::Display; + +/// Maximum allowed size for a message payload +pub const MAX_MESSAGE_SIZE: usize = 16 * 1024 * 1024; + +/// Unique identifier for a participant in the network +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct ParticipantId(pub u16); + +impl From for u16 { + fn from(val: ParticipantId) -> Self { + val.0 + } +} + +/// Type of message delivery mechanism +#[derive(Debug, Clone)] +pub enum MessageDelivery { + /// Broadcast to all peers via gossipsub + Broadcast { + /// The topic to broadcast on + topic: IdentTopic, + }, + /// Direct P2P message to a specific peer + Direct { + /// The target peer ID + peer_id: PeerId, + }, +} + +/// Message routing information +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MessageRouting { + /// Unique identifier for this message + pub message_id: u64, + /// The round/sequence number this message belongs to + pub round_id: u16, + /// The sender's information + pub sender: ParticipantInfo, + /// Optional recipient information for direct messages + pub recipient: Option, +} + +/// Information about a participant in the network +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ParticipantInfo { + /// The participant's unique ID + pub id: ParticipantId, + /// The participant's public key (if known) + pub public_key: Option, +} + +/// A protocol message that can be sent over the network +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProtocolMessage { + /// The protocol name + pub protocol: String, + /// Routing information for the message + pub routing: MessageRouting, + /// The actual message payload + pub payload: Vec, +} + +/// Internal representation of a message to be sent +#[derive(Debug)] +pub struct OutboundMessage { + /// The message to be sent + pub message: ProtocolMessage, + /// How the message should be delivered + pub delivery: MessageDelivery, +} + +impl Display for ParticipantId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "P{}", self.0) + } +} + +impl Display for MessageRouting { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "msg={} round={} from={} to={:?}", + self.message_id, + self.round_id, + self.sender.id, + self.recipient.as_ref().map(|r| r.id) + ) + } +} + +impl Display for ParticipantInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{} key={}", + self.id, + if self.public_key.is_some() { + "yes" + } else { + "no" + } + ) + } +} + +impl Display for ProtocolMessage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{} payload_size={}", self.routing, self.payload.len()) + } +} diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index 64807d16a..c574ff830 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -44,9 +44,11 @@ tokio = { workspace = true, default-features = false } # Networking-related dependencies gadget-networking = { workspace = true, optional = true } -gadget-client-networking = { workspace = true, optional = true } gadget-contexts = { workspace = true } gadget-context-derive = { workspace = true, optional = true } +# Round-based protocol support +gadget-networking-round-based-extension = { workspace = true, optional = true } + # Optional dependencies for testing gadget-testing-utils = { workspace = true, optional = true } @@ -82,10 +84,7 @@ web = [ "gadget-macros?/web", ] -macros = [ - "dep:gadget-macros", - "dep:gadget-context-derive", -] +macros = ["dep:gadget-macros", "dep:gadget-context-derive"] build = ["dep:blueprint-metadata", "dep:blueprint-build-utils"] @@ -97,7 +96,7 @@ tangle = [ "gadget-macros?/tangle", "gadget-testing-utils?/tangle", "gadget-utils/tangle", - "gadget-event-listeners/tangle" + "gadget-event-listeners/tangle", ] evm = [ @@ -108,7 +107,7 @@ evm = [ "gadget-utils/evm", "gadget-testing-utils?/anvil", "gadget-macros?/evm", - "gadget-event-listeners/evm" + "gadget-event-listeners/evm", ] eigenlayer = [ @@ -120,40 +119,28 @@ eigenlayer = [ "gadget-testing-utils?/eigenlayer", "gadget-utils/eigenlayer", "gadget-macros?/eigenlayer", - "gadget-event-listeners/evm" + "gadget-event-listeners/evm", ] -testing = [ - "dep:gadget-testing-utils", - "dep:tempfile", - "std", -] +testing = ["dep:gadget-testing-utils", "dep:tempfile", "std"] networking = [ "dep:gadget-networking", - "dep:gadget-client-networking", "gadget-contexts/networking", - "gadget-clients/networking", "gadget-keystore/std", "gadget-config/networking", "gadget-macros?/networking", "gadget-context-derive?/networking", ] -networking-sp-core-ecdsa = [ - "gadget-networking/sp-core-ecdsa" -] +networking-sp-core-ecdsa = ["gadget-networking/sp-core-ecdsa"] -networking-sr25519 = [ - "gadget-networking/sp-core-sr25519" -] +networking-sr25519 = ["gadget-networking/sp-core-sr25519"] -networking-ed25519 = [ - "gadget-networking/sp-core-ed25519" -] +networking-ed25519 = ["gadget-networking/sp-core-ed25519"] local-store = ["gadget-stores/local"] -round-based-compat = ["gadget-networking/round-based-compat"] +round-based-compat = ["dep:gadget-networking-round-based-extension"] -cronjob = ["gadget-event-listeners/cronjob"] \ No newline at end of file +cronjob = ["gadget-event-listeners/cronjob"] diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index d157535ba..9c31c4f19 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -33,6 +33,8 @@ pub mod build { pub mod networking { /// Networking utilities for blueprints pub use gadget_networking::*; + #[cfg(feature = "round-based-compat")] + pub use gadget_networking_round_based_extension as round_based_compat; } /// Event listener infrastructure for handling blueprint events diff --git a/crates/testing-utils/tangle/src/harness.rs b/crates/testing-utils/tangle/src/harness.rs index cd877dc54..0dba8a27c 100644 --- a/crates/testing-utils/tangle/src/harness.rs +++ b/crates/testing-utils/tangle/src/harness.rs @@ -348,7 +348,7 @@ impl TangleTestHarness { service_id, Job::from(job_id), inputs, - 0, + 0, // TODO: Should this take a call ID? or leave it up to the caller to verify? ) .await .map_err(|e| Error::Setup(e.to_string()))?; diff --git a/crates/testing-utils/tangle/src/node/transactions.rs b/crates/testing-utils/tangle/src/node/transactions.rs index 9a4713fa5..2d3479076 100644 --- a/crates/testing-utils/tangle/src/node/transactions.rs +++ b/crates/testing-utils/tangle/src/node/transactions.rs @@ -198,7 +198,7 @@ pub async fn submit_job>( service_id: u64, job_id: Job, job_params: Args, - call_id: u64, + _call_id: u64, // TODO: Actually verify this ) -> Result { let call = api::tx().services().call(service_id, job_id, job_params); let events = client @@ -215,7 +215,6 @@ pub async fn submit_job>( if job_called.service_id == service_id && job_called.job == job_id && user.account_id() == job_called.caller - && job_called.call_id == call_id { return Ok(job_called); } diff --git a/flake.lock b/flake.lock index 63fc2a2c2..0bfa5f257 100644 --- a/flake.lock +++ b/flake.lock @@ -28,11 +28,11 @@ ] }, "locked": { - "lastModified": 1733130721, - "narHash": "sha256-FeL2dez6gE3/u+Dq5Ot4TA0EbonBRo2/7pTYNmBBTNY=", + "lastModified": 1738660302, + "narHash": "sha256-aLWyhJx2cO/M3/QLoDBpsObFfjC9e/VEN6HtaI0U6IA=", "owner": "shazow", "repo": "foundry.nix", - "rev": "277722b2fc52af44c0827c47f0f170c7a9b17b0e", + "rev": "33a209625b9e31227a5f11417e95a3ac7264d811", "type": "github" }, "original": { @@ -44,11 +44,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1733064805, - "narHash": "sha256-7NbtSLfZO0q7MXPl5hzA0sbVJt6pWxxtGWbaVUDDmjs=", + "lastModified": 1739863612, + "narHash": "sha256-UbtgxplOhFcyjBcNbTVO8+HUHAl/WXFDOb6LvqShiZo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "31d66ae40417bb13765b0ad75dd200400e98de84", + "rev": "632f04521e847173c54fa72973ec6c39a371211c", "type": "github" }, "original": { @@ -73,11 +73,11 @@ ] }, "locked": { - "lastModified": 1733106880, - "narHash": "sha256-aJmAIjZfWfPSWSExwrYBLRgXVvgF5LP1vaeUGOOIQ98=", + "lastModified": 1739932111, + "narHash": "sha256-WkayjH0vuGw0hx2gmjTUGFRvMKpM17gKcpL/U8EUUw0=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "e66c0d43abf5bdefb664c3583ca8994983c332ae", + "rev": "75b2271c5c087d830684cd5462d4410219acc367", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 1568804b4..59ec63992 100644 --- a/flake.nix +++ b/flake.nix @@ -19,10 +19,21 @@ }; }; - outputs = { self, nixpkgs, rust-overlay, foundry, flake-utils }: - flake-utils.lib.eachDefaultSystem (system: + outputs = + { + self, + nixpkgs, + rust-overlay, + foundry, + flake-utils, + }: + flake-utils.lib.eachDefaultSystem ( + system: let - overlays = [ (import rust-overlay) foundry.overlay ]; + overlays = [ + (import rust-overlay) + foundry.overlay + ]; pkgs = import nixpkgs { inherit system overlays; }; @@ -41,7 +52,6 @@ # Protocol Buffers pkgs.protobuf # Mold Linker for faster builds (only on Linux) - (lib.optionals pkgs.stdenv.isLinux pkgs.om4) (lib.optionals pkgs.stdenv.isLinux pkgs.mold) (lib.optionals pkgs.stdenv.isDarwin pkgs.darwin.apple_sdk.frameworks.Security) (lib.optionals pkgs.stdenv.isDarwin pkgs.darwin.apple_sdk.frameworks.SystemConfiguration) @@ -66,7 +76,13 @@ ]; # Environment variables RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; - LD_LIBRARY_PATH = lib.makeLibraryPath [ pkgs.gmp pkgs.libclang pkgs.openssl.dev pkgs.stdenv.cc.cc ]; + LD_LIBRARY_PATH = lib.makeLibraryPath [ + pkgs.gmp + pkgs.libclang + pkgs.openssl.dev + pkgs.stdenv.cc.cc + ]; }; - }); + } + ); }